problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_41603
rasdani/github-patches
git_diff
liqd__a4-meinberlin-675
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Send wagtail form emails asynchronously Currently wagtail form emails are sent synchronously because the `send` attributes are not serializable. see #671 for the temporary fix. Rather then sending sync we should fix the problem and provide the required data as a serializable type </issue> <code> [start of meinberlin/apps/cms/models.py] 1 from django.db import models 2 from modelcluster.fields import ParentalKey 3 from modelcluster.models import ClusterableModel 4 from wagtail.wagtailadmin import edit_handlers 5 from wagtail.wagtailcore import blocks 6 from wagtail.wagtailcore import fields 7 from wagtail.wagtailcore.models import Orderable 8 from wagtail.wagtailcore.models import Page 9 from wagtail.wagtailforms.models import AbstractEmailForm 10 from wagtail.wagtailforms.models import AbstractFormField 11 from wagtail.wagtailimages.edit_handlers import ImageChooserPanel 12 from wagtail.wagtailsnippets.models import register_snippet 13 14 from meinberlin.apps.actions import blocks as actions_blocks 15 16 from . import blocks as cms_blocks 17 from . import emails 18 19 20 class SimplePage(Page): 21 body = fields.RichTextField(blank=True) 22 23 content_panels = [ 24 edit_handlers.FieldPanel('title'), 25 edit_handlers.FieldPanel('body'), 26 ] 27 28 subpage_types = [] 29 30 31 class HomePage(Page): 32 body = fields.StreamField([ 33 ('paragraph', blocks.RichTextBlock( 34 template='meinberlin_cms/blocks/richtext_block.html' 35 )), 36 ('call_to_action', cms_blocks.CallToActionBlock()), 37 ('columns_text', cms_blocks.ColumnsBlock()), 38 ('projects', cms_blocks.ProjectsWrapperBlock()), 39 ('activities', actions_blocks.PlatformActivityBlock()), 40 ]) 41 42 subtitle = models.CharField(max_length=120) 43 44 header_image = models.ForeignKey( 45 'wagtailimages.Image', 46 null=True, 47 blank=False, 48 on_delete=models.SET_NULL, 49 related_name='+' 50 ) 51 52 content_panels = Page.content_panels + [ 53 edit_handlers.FieldPanel('subtitle'), 54 ImageChooserPanel('header_image'), 55 edit_handlers.StreamFieldPanel('body'), 56 ] 57 58 59 class MenuItem(models.Model): 60 title = models.CharField(max_length=255) 61 link_page = models.ForeignKey('wagtailcore.Page') 62 63 @property 64 def url(self): 65 return self.link_page.url 66 67 def __str__(self): 68 return self.title 69 70 panels = [ 71 edit_handlers.FieldPanel('title'), 72 edit_handlers.PageChooserPanel('link_page') 73 ] 74 75 76 @register_snippet 77 class NavigationMenu(ClusterableModel): 78 title = models.CharField(max_length=255, null=False, blank=False) 79 80 def __str__(self): 81 return self.title 82 83 panels = [ 84 edit_handlers.FieldPanel('title'), 85 edit_handlers.InlinePanel('items') 86 ] 87 88 89 class NavigationMenuItem(Orderable, MenuItem): 90 parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items') 91 92 93 class EmailFormField(AbstractFormField): 94 page = ParentalKey('EmailFormPage', related_name='form_fields') 95 96 97 class EmailFormPage(AbstractEmailForm): 98 intro = fields.RichTextField( 99 help_text='Introduction text shown above the form' 100 ) 101 thank_you = fields.RichTextField( 102 help_text='Text shown after form submission', 103 ) 104 email_content = models.CharField( 105 max_length=200, 106 help_text='Email content message', 107 ) 108 attach_as = models.CharField( 109 max_length=3, 110 choices=( 111 ('xls', 'XLSX Document'), 112 ('txt', 'Text'), 113 ), 114 default='xls', 115 help_text='Form results are send in this document format', 116 ) 117 118 content_panels = AbstractEmailForm.content_panels + [ 119 edit_handlers.MultiFieldPanel([ 120 edit_handlers.FieldPanel('intro', classname='full'), 121 edit_handlers.FieldPanel('thank_you', classname='full'), 122 ], 'Page'), 123 edit_handlers.MultiFieldPanel([ 124 edit_handlers.FieldPanel('to_address'), 125 edit_handlers.FieldPanel('subject'), 126 edit_handlers.FieldPanel('email_content', classname='full'), 127 edit_handlers.FieldPanel('attach_as'), 128 ], 'Email'), 129 edit_handlers.InlinePanel('form_fields', label='Form fields'), 130 ] 131 132 def send_mail(self, form): 133 self.form = form 134 if self.attach_as == 'xls': 135 emails.XlsxFormEmail.send(self) 136 elif self.attach_as == 'txt': 137 emails.TextFormEmail.send(self) 138 139 @property 140 def field_values(self): 141 fields = {} 142 for field in self.form: 143 value = field.value() 144 if isinstance(value, list): 145 value = ', '.join(value) 146 fields[field.label] = value 147 return fields 148 [end of meinberlin/apps/cms/models.py] [start of meinberlin/apps/cms/emails.py] 1 import io 2 from email.mime.application import MIMEApplication 3 from email.mime.text import MIMEText 4 5 import xlsxwriter 6 from django.utils import timezone 7 from django.utils.translation import ugettext as _ 8 9 from adhocracy4.emails.mixins import SyncEmailMixin 10 from meinberlin.apps.contrib.emails import Email 11 12 13 class FormEmail(SyncEmailMixin, Email): 14 template_name = 'meinberlin_cms/emails/form_submission' 15 16 def get_receivers(self): 17 return [x.strip() for x in self.object.to_address.split(',')] 18 19 20 class XlsxFormEmail(FormEmail): 21 22 def _generate_xlsx(self): 23 stream = io.BytesIO() 24 workbook = xlsxwriter.Workbook(stream, {'in_memory': True}) 25 worksheet = workbook.add_worksheet() 26 27 worksheet.write(0, 0, _('Form Field')) 28 worksheet.write(0, 1, _('Response')) 29 30 rows = self.object.field_values.items() 31 for rownum, row in enumerate(rows, start=1): 32 worksheet.write(rownum, 0, row[0]) 33 worksheet.write(rownum, 1, self._fix_newline_if_string(row[1])) 34 35 workbook.close() 36 return stream.getvalue() 37 38 def _fix_newline_if_string(self, value): 39 if isinstance(value, str): 40 return value.replace('\r', '') 41 return value 42 43 def get_attachments(self): 44 attachments = super().get_attachments() 45 xlsx_data = self._generate_xlsx() 46 mime_doc = MIMEApplication( 47 _data=xlsx_data, 48 _subtype='vnd.openxmlformats-officedocument.spreadsheetml.sheet') 49 timestamp = timezone.now().strftime("%Y-%m-%d") 50 form_title = self.object.title.replace(' ', '_') 51 submission_pk = self.object.get_submission_class().objects.last().pk 52 filename = '{}_{}_{}.xlsx'.format(timestamp, form_title, submission_pk) 53 mime_doc.add_header( 54 'Content-Disposition', 55 'attachment; filename="{}"'.format(filename) 56 ) 57 return attachments + [mime_doc] 58 59 60 class TextFormEmail(FormEmail): 61 62 def get_attachments(self): 63 attachments = super().get_attachments() 64 text = '' 65 for field, value in self.object.field_values.items(): 66 text += '{}:\n{}\n\n'.format(field, value) 67 mime_doc = MIMEText(_text=text, _charset='utf-8') 68 return attachments + [mime_doc] 69 [end of meinberlin/apps/cms/emails.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/cms/emails.py b/meinberlin/apps/cms/emails.py --- a/meinberlin/apps/cms/emails.py +++ b/meinberlin/apps/cms/emails.py @@ -6,15 +6,14 @@ from django.utils import timezone from django.utils.translation import ugettext as _ -from adhocracy4.emails.mixins import SyncEmailMixin from meinberlin.apps.contrib.emails import Email -class FormEmail(SyncEmailMixin, Email): +class FormEmail(Email): template_name = 'meinberlin_cms/emails/form_submission' def get_receivers(self): - return [x.strip() for x in self.object.to_address.split(',')] + return [x.strip() for x in self.kwargs.get('to_addresses')] class XlsxFormEmail(FormEmail): @@ -27,8 +26,8 @@ worksheet.write(0, 0, _('Form Field')) worksheet.write(0, 1, _('Response')) - rows = self.object.field_values.items() - for rownum, row in enumerate(rows, start=1): + field_values = self.kwargs.get('field_values') + for rownum, row in enumerate(field_values.items(), start=1): worksheet.write(rownum, 0, row[0]) worksheet.write(rownum, 1, self._fix_newline_if_string(row[1])) @@ -47,8 +46,8 @@ _data=xlsx_data, _subtype='vnd.openxmlformats-officedocument.spreadsheetml.sheet') timestamp = timezone.now().strftime("%Y-%m-%d") - form_title = self.object.title.replace(' ', '_') - submission_pk = self.object.get_submission_class().objects.last().pk + form_title = self.kwargs.get('title') + submission_pk = self.kwargs.get('submission_pk') filename = '{}_{}_{}.xlsx'.format(timestamp, form_title, submission_pk) mime_doc.add_header( 'Content-Disposition', @@ -62,7 +61,8 @@ def get_attachments(self): attachments = super().get_attachments() text = '' - for field, value in self.object.field_values.items(): + field_values = self.kwargs.get('field_values') + for field, value in field_values.items(): text += '{}:\n{}\n\n'.format(field, value) mime_doc = MIMEText(_text=text, _charset='utf-8') return attachments + [mime_doc] diff --git a/meinberlin/apps/cms/models.py b/meinberlin/apps/cms/models.py --- a/meinberlin/apps/cms/models.py +++ b/meinberlin/apps/cms/models.py @@ -130,16 +130,20 @@ ] def send_mail(self, form): - self.form = form + kwargs = { + 'title': self.title.replace(' ', '_'), + 'to_addresses': self.to_address.split(','), + 'field_values': self.get_field_values(form), + 'submission_pk': self.get_submission_class().objects.last().pk + } if self.attach_as == 'xls': - emails.XlsxFormEmail.send(self) + emails.XlsxFormEmail.send(self, **kwargs) elif self.attach_as == 'txt': - emails.TextFormEmail.send(self) + emails.TextFormEmail.send(self, **kwargs) - @property - def field_values(self): + def get_field_values(self, form): fields = {} - for field in self.form: + for field in form: value = field.value() if isinstance(value, list): value = ', '.join(value)
{"golden_diff": "diff --git a/meinberlin/apps/cms/emails.py b/meinberlin/apps/cms/emails.py\n--- a/meinberlin/apps/cms/emails.py\n+++ b/meinberlin/apps/cms/emails.py\n@@ -6,15 +6,14 @@\n from django.utils import timezone\n from django.utils.translation import ugettext as _\n \n-from adhocracy4.emails.mixins import SyncEmailMixin\n from meinberlin.apps.contrib.emails import Email\n \n \n-class FormEmail(SyncEmailMixin, Email):\n+class FormEmail(Email):\n template_name = 'meinberlin_cms/emails/form_submission'\n \n def get_receivers(self):\n- return [x.strip() for x in self.object.to_address.split(',')]\n+ return [x.strip() for x in self.kwargs.get('to_addresses')]\n \n \n class XlsxFormEmail(FormEmail):\n@@ -27,8 +26,8 @@\n worksheet.write(0, 0, _('Form Field'))\n worksheet.write(0, 1, _('Response'))\n \n- rows = self.object.field_values.items()\n- for rownum, row in enumerate(rows, start=1):\n+ field_values = self.kwargs.get('field_values')\n+ for rownum, row in enumerate(field_values.items(), start=1):\n worksheet.write(rownum, 0, row[0])\n worksheet.write(rownum, 1, self._fix_newline_if_string(row[1]))\n \n@@ -47,8 +46,8 @@\n _data=xlsx_data,\n _subtype='vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n timestamp = timezone.now().strftime(\"%Y-%m-%d\")\n- form_title = self.object.title.replace(' ', '_')\n- submission_pk = self.object.get_submission_class().objects.last().pk\n+ form_title = self.kwargs.get('title')\n+ submission_pk = self.kwargs.get('submission_pk')\n filename = '{}_{}_{}.xlsx'.format(timestamp, form_title, submission_pk)\n mime_doc.add_header(\n 'Content-Disposition',\n@@ -62,7 +61,8 @@\n def get_attachments(self):\n attachments = super().get_attachments()\n text = ''\n- for field, value in self.object.field_values.items():\n+ field_values = self.kwargs.get('field_values')\n+ for field, value in field_values.items():\n text += '{}:\\n{}\\n\\n'.format(field, value)\n mime_doc = MIMEText(_text=text, _charset='utf-8')\n return attachments + [mime_doc]\ndiff --git a/meinberlin/apps/cms/models.py b/meinberlin/apps/cms/models.py\n--- a/meinberlin/apps/cms/models.py\n+++ b/meinberlin/apps/cms/models.py\n@@ -130,16 +130,20 @@\n ]\n \n def send_mail(self, form):\n- self.form = form\n+ kwargs = {\n+ 'title': self.title.replace(' ', '_'),\n+ 'to_addresses': self.to_address.split(','),\n+ 'field_values': self.get_field_values(form),\n+ 'submission_pk': self.get_submission_class().objects.last().pk\n+ }\n if self.attach_as == 'xls':\n- emails.XlsxFormEmail.send(self)\n+ emails.XlsxFormEmail.send(self, **kwargs)\n elif self.attach_as == 'txt':\n- emails.TextFormEmail.send(self)\n+ emails.TextFormEmail.send(self, **kwargs)\n \n- @property\n- def field_values(self):\n+ def get_field_values(self, form):\n fields = {}\n- for field in self.form:\n+ for field in form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n", "issue": "Send wagtail form emails asynchronously \nCurrently wagtail form emails are sent synchronously because the `send` attributes are not serializable. see #671 for the temporary fix.\r\nRather then sending sync we should fix the problem and provide the required data as a serializable type\n", "before_files": [{"content": "from django.db import models\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.wagtailadmin import edit_handlers\nfrom wagtail.wagtailcore import blocks\nfrom wagtail.wagtailcore import fields\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailforms.models import AbstractEmailForm\nfrom wagtail.wagtailforms.models import AbstractFormField\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailsnippets.models import register_snippet\n\nfrom meinberlin.apps.actions import blocks as actions_blocks\n\nfrom . import blocks as cms_blocks\nfrom . import emails\n\n\nclass SimplePage(Page):\n body = fields.RichTextField(blank=True)\n\n content_panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.FieldPanel('body'),\n ]\n\n subpage_types = []\n\n\nclass HomePage(Page):\n body = fields.StreamField([\n ('paragraph', blocks.RichTextBlock(\n template='meinberlin_cms/blocks/richtext_block.html'\n )),\n ('call_to_action', cms_blocks.CallToActionBlock()),\n ('columns_text', cms_blocks.ColumnsBlock()),\n ('projects', cms_blocks.ProjectsWrapperBlock()),\n ('activities', actions_blocks.PlatformActivityBlock()),\n ])\n\n subtitle = models.CharField(max_length=120)\n\n header_image = models.ForeignKey(\n 'wagtailimages.Image',\n null=True,\n blank=False,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n content_panels = Page.content_panels + [\n edit_handlers.FieldPanel('subtitle'),\n ImageChooserPanel('header_image'),\n edit_handlers.StreamFieldPanel('body'),\n ]\n\n\nclass MenuItem(models.Model):\n title = models.CharField(max_length=255)\n link_page = models.ForeignKey('wagtailcore.Page')\n\n @property\n def url(self):\n return self.link_page.url\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.PageChooserPanel('link_page')\n ]\n\n\n@register_snippet\nclass NavigationMenu(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n\n def __str__(self):\n return self.title\n\n panels = [\n edit_handlers.FieldPanel('title'),\n edit_handlers.InlinePanel('items')\n ]\n\n\nclass NavigationMenuItem(Orderable, MenuItem):\n parent = ParentalKey('meinberlin_cms.NavigationMenu', related_name='items')\n\n\nclass EmailFormField(AbstractFormField):\n page = ParentalKey('EmailFormPage', related_name='form_fields')\n\n\nclass EmailFormPage(AbstractEmailForm):\n intro = fields.RichTextField(\n help_text='Introduction text shown above the form'\n )\n thank_you = fields.RichTextField(\n help_text='Text shown after form submission',\n )\n email_content = models.CharField(\n max_length=200,\n help_text='Email content message',\n )\n attach_as = models.CharField(\n max_length=3,\n choices=(\n ('xls', 'XLSX Document'),\n ('txt', 'Text'),\n ),\n default='xls',\n help_text='Form results are send in this document format',\n )\n\n content_panels = AbstractEmailForm.content_panels + [\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('intro', classname='full'),\n edit_handlers.FieldPanel('thank_you', classname='full'),\n ], 'Page'),\n edit_handlers.MultiFieldPanel([\n edit_handlers.FieldPanel('to_address'),\n edit_handlers.FieldPanel('subject'),\n edit_handlers.FieldPanel('email_content', classname='full'),\n edit_handlers.FieldPanel('attach_as'),\n ], 'Email'),\n edit_handlers.InlinePanel('form_fields', label='Form fields'),\n ]\n\n def send_mail(self, form):\n self.form = form\n if self.attach_as == 'xls':\n emails.XlsxFormEmail.send(self)\n elif self.attach_as == 'txt':\n emails.TextFormEmail.send(self)\n\n @property\n def field_values(self):\n fields = {}\n for field in self.form:\n value = field.value()\n if isinstance(value, list):\n value = ', '.join(value)\n fields[field.label] = value\n return fields\n", "path": "meinberlin/apps/cms/models.py"}, {"content": "import io\nfrom email.mime.application import MIMEApplication\nfrom email.mime.text import MIMEText\n\nimport xlsxwriter\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom adhocracy4.emails.mixins import SyncEmailMixin\nfrom meinberlin.apps.contrib.emails import Email\n\n\nclass FormEmail(SyncEmailMixin, Email):\n template_name = 'meinberlin_cms/emails/form_submission'\n\n def get_receivers(self):\n return [x.strip() for x in self.object.to_address.split(',')]\n\n\nclass XlsxFormEmail(FormEmail):\n\n def _generate_xlsx(self):\n stream = io.BytesIO()\n workbook = xlsxwriter.Workbook(stream, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n\n worksheet.write(0, 0, _('Form Field'))\n worksheet.write(0, 1, _('Response'))\n\n rows = self.object.field_values.items()\n for rownum, row in enumerate(rows, start=1):\n worksheet.write(rownum, 0, row[0])\n worksheet.write(rownum, 1, self._fix_newline_if_string(row[1]))\n\n workbook.close()\n return stream.getvalue()\n\n def _fix_newline_if_string(self, value):\n if isinstance(value, str):\n return value.replace('\\r', '')\n return value\n\n def get_attachments(self):\n attachments = super().get_attachments()\n xlsx_data = self._generate_xlsx()\n mime_doc = MIMEApplication(\n _data=xlsx_data,\n _subtype='vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n timestamp = timezone.now().strftime(\"%Y-%m-%d\")\n form_title = self.object.title.replace(' ', '_')\n submission_pk = self.object.get_submission_class().objects.last().pk\n filename = '{}_{}_{}.xlsx'.format(timestamp, form_title, submission_pk)\n mime_doc.add_header(\n 'Content-Disposition',\n 'attachment; filename=\"{}\"'.format(filename)\n )\n return attachments + [mime_doc]\n\n\nclass TextFormEmail(FormEmail):\n\n def get_attachments(self):\n attachments = super().get_attachments()\n text = ''\n for field, value in self.object.field_values.items():\n text += '{}:\\n{}\\n\\n'.format(field, value)\n mime_doc = MIMEText(_text=text, _charset='utf-8')\n return attachments + [mime_doc]\n", "path": "meinberlin/apps/cms/emails.py"}]}
2,562
814
gh_patches_debug_39538
rasdani/github-patches
git_diff
rotki__rotki-4037
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User already exists during setup (DAppNode) ## Problem Definition Using DAppNode Package: 1. Install Rotki 2. Setup Rotki using Premium API Key with restore sync'd database option 3. Enter incorrect password 4. Get error that password could not decrypt database 5. Go back 6. Enter correct password 7. "User $USERNAME already exists. User data dir: /data/$USERNAME" Can't proceed...have to delete and re-create Rotki package ## Logs None ### System Description Operating system: DAppNode Rotki version: DAppNode Package `0.1.9 (v1.23.1 upstream)` </issue> <code> [start of rotkehlchen/premium/sync.py] 1 import base64 2 import logging 3 import shutil 4 from enum import Enum 5 from typing import Any, Dict, NamedTuple, Optional, Tuple 6 7 from typing_extensions import Literal 8 9 from rotkehlchen.data_handler import DataHandler 10 from rotkehlchen.errors import ( 11 PremiumAuthenticationError, 12 RemoteError, 13 RotkehlchenPermissionError, 14 UnableToDecryptRemoteData, 15 ) 16 from rotkehlchen.logging import RotkehlchenLogsAdapter 17 from rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify 18 from rotkehlchen.utils.misc import ts_now 19 20 logger = logging.getLogger(__name__) 21 log = RotkehlchenLogsAdapter(logger) 22 23 24 class CanSync(Enum): 25 YES = 0 26 NO = 1 27 ASK_USER = 2 28 29 30 class SyncCheckResult(NamedTuple): 31 # The result of the sync check 32 can_sync: CanSync 33 # If result is ASK_USER, what should the message be? 34 message: str 35 payload: Optional[Dict[str, Any]] 36 37 38 class PremiumSyncManager(): 39 40 def __init__(self, data: DataHandler, password: str) -> None: 41 # Initialize this with the value saved in the DB 42 self.last_data_upload_ts = data.db.get_last_data_upload_ts() 43 self.data = data 44 self.password = password 45 self.premium: Optional[Premium] = None 46 47 def _can_sync_data_from_server(self, new_account: bool) -> SyncCheckResult: 48 """ 49 Checks if the remote data can be pulled from the server. 50 51 Returns a SyncCheckResult denoting whether we can pull for sure, 52 whether we can't pull or whether the user should be asked. If the user 53 should be asked a message is also returned 54 """ 55 log.debug('can sync data from server -- start') 56 if self.premium is None: 57 return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None) 58 59 b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password) 60 61 try: 62 metadata = self.premium.query_last_data_metadata() 63 except RemoteError as e: 64 log.debug('can sync data from server failed', error=str(e)) 65 return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None) 66 67 if new_account: 68 return SyncCheckResult(can_sync=CanSync.YES, message='', payload=None) 69 70 if not self.data.db.get_premium_sync(): 71 # If it's not a new account and the db setting for premium syncing is off stop 72 return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None) 73 74 log.debug( 75 'CAN_PULL', 76 ours=our_hash, 77 theirs=metadata.data_hash, 78 ) 79 if our_hash == metadata.data_hash: 80 log.debug('sync from server stopped -- same hash') 81 # same hash -- no need to get anything 82 return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None) 83 84 our_last_write_ts = self.data.db.get_last_write_ts() 85 data_bytes_size = len(base64.b64decode(b64_encoded_data)) 86 87 local_more_recent = our_last_write_ts >= metadata.last_modify_ts 88 89 if local_more_recent: 90 log.debug('sync from server stopped -- local is newer') 91 return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None) 92 93 # else remote is bigger 94 return SyncCheckResult( 95 can_sync=CanSync.ASK_USER, 96 message='Detected remote database with bigger size than the local one. ', 97 payload={ 98 'local_size': data_bytes_size, 99 'remote_size': metadata.data_size, 100 'local_last_modified': our_last_write_ts, 101 'remote_last_modified': metadata.last_modify_ts, 102 }, 103 ) 104 105 def _sync_data_from_server_and_replace_local(self) -> Tuple[bool, str]: 106 """ 107 Performs syncing of data from server and replaces local db 108 109 Returns true for success and False for error/failure 110 111 May raise: 112 - PremiumAuthenticationError due to an UnableToDecryptRemoteData 113 coming from decompress_and_decrypt_db. This happens when the given password 114 does not match the one on the saved DB. 115 """ 116 if self.premium is None: 117 return False, 'Pulling failed. User does not have active premium.' 118 119 try: 120 result = self.premium.pull_data() 121 except RemoteError as e: 122 log.debug('sync from server -- pulling failed.', error=str(e)) 123 return False, f'Pulling failed: {str(e)}' 124 125 if result['data'] is None: 126 log.debug('sync from server -- no data found.') 127 return False, 'No data found' 128 129 try: 130 self.data.decompress_and_decrypt_db(self.password, result['data']) 131 except UnableToDecryptRemoteData as e: 132 raise PremiumAuthenticationError( 133 'The given password can not unlock the database that was retrieved from ' 134 'the server. Make sure to use the same password as when the account was created.', 135 ) from e 136 137 return True, '' 138 139 def maybe_upload_data_to_server(self, force_upload: bool = False) -> bool: 140 # if user has no premium do nothing 141 if self.premium is None: 142 return False 143 144 if not self.data.db.get_premium_sync() and not force_upload: 145 return False 146 147 # upload only once per hour 148 diff = ts_now() - self.last_data_upload_ts 149 if diff < 3600 and not force_upload: 150 return False 151 152 try: 153 metadata = self.premium.query_last_data_metadata() 154 except RemoteError as e: 155 log.debug('upload to server -- fetching metadata error', error=str(e)) 156 return False 157 b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password) 158 159 log.debug( 160 'CAN_PUSH', 161 ours=our_hash, 162 theirs=metadata.data_hash, 163 ) 164 if our_hash == metadata.data_hash and not force_upload: 165 log.debug('upload to server stopped -- same hash') 166 # same hash -- no need to upload anything 167 return False 168 169 our_last_write_ts = self.data.db.get_last_write_ts() 170 if our_last_write_ts <= metadata.last_modify_ts and not force_upload: 171 # Server's DB was modified after our local DB 172 log.debug( 173 f'upload to server stopped -- remote db({metadata.last_modify_ts}) ' 174 f'more recent than local({our_last_write_ts})', 175 ) 176 return False 177 178 data_bytes_size = len(base64.b64decode(b64_encoded_data)) 179 if data_bytes_size < metadata.data_size and not force_upload: 180 # Let's be conservative. 181 # TODO: Here perhaps prompt user in the future 182 log.debug( 183 f'upload to server stopped -- remote db({metadata.data_size}) ' 184 f'bigger than local({data_bytes_size})', 185 ) 186 return False 187 188 try: 189 self.premium.upload_data( 190 data_blob=b64_encoded_data, 191 our_hash=our_hash, 192 last_modify_ts=our_last_write_ts, 193 compression_type='zlib', 194 ) 195 except RemoteError as e: 196 log.debug('upload to server -- upload error', error=str(e)) 197 return False 198 199 # update the last data upload value 200 self.last_data_upload_ts = ts_now() 201 self.data.db.update_last_data_upload_ts(self.last_data_upload_ts) 202 log.debug('upload to server -- success') 203 return True 204 205 def sync_data(self, action: Literal['upload', 'download']) -> Tuple[bool, str]: 206 msg = '' 207 208 if action == 'upload': 209 success = self.maybe_upload_data_to_server(force_upload=True) 210 211 if not success: 212 msg = 'Upload failed' 213 return success, msg 214 215 return self._sync_data_from_server_and_replace_local() 216 217 def _sync_if_allowed( 218 self, 219 sync_approval: Literal['yes', 'no', 'unknown'], 220 result: SyncCheckResult, 221 ) -> None: 222 if result.can_sync == CanSync.ASK_USER: 223 if sync_approval == 'unknown': 224 log.info('Remote DB is possibly newer. Ask user.') 225 raise RotkehlchenPermissionError(result.message, result.payload) 226 227 if sync_approval == 'yes': 228 log.info('User approved data sync from server') 229 # this may raise due to password 230 self._sync_data_from_server_and_replace_local() 231 232 else: 233 log.debug('Could sync data from server but user refused') 234 elif result.can_sync == CanSync.YES: 235 log.info('User approved data sync from server') 236 self._sync_data_from_server_and_replace_local() # this may raise due to password 237 238 def try_premium_at_start( 239 self, 240 given_premium_credentials: Optional[PremiumCredentials], 241 username: str, 242 create_new: bool, 243 sync_approval: Literal['yes', 'no', 'unknown'], 244 sync_database: bool, 245 ) -> Optional[Premium]: 246 """ 247 Check if new user provided api pair or we already got one in the DB 248 249 Returns the created premium if user's premium credentials were fine. 250 251 If not it will raise PremiumAuthenticationError. 252 253 If no credentials were given it returns None 254 """ 255 256 if given_premium_credentials is not None: 257 assert create_new, 'We should never get here for an already existing account' 258 259 try: 260 self.premium = premium_create_and_verify(given_premium_credentials) 261 except PremiumAuthenticationError as e: 262 log.error('Given API key is invalid') 263 # At this point we are at a new user trying to create an account with 264 # premium API keys and we failed. But a directory was created. Remove it. 265 # But create a backup of it in case something went really wrong 266 # and the directory contained data we did not want to lose 267 shutil.move( 268 self.data.user_data_dir, # type: ignore 269 self.data.data_directory / f'auto_backup_{username}_{ts_now()}', 270 ) 271 raise PremiumAuthenticationError( 272 'Could not verify keys for the new account. ' 273 '{}'.format(str(e)), 274 ) from e 275 276 # else, if we got premium data in the DB initialize it and try to sync with the server 277 db_credentials = self.data.db.get_rotkehlchen_premium() 278 if db_credentials: 279 assert not create_new, 'We should never get here for a new account' 280 try: 281 self.premium = premium_create_and_verify(db_credentials) 282 except PremiumAuthenticationError as e: 283 message = ( 284 f'Could not authenticate with the rotkehlchen server with ' 285 f'the API keys found in the Database. Error: {str(e)}' 286 ) 287 log.error(message) 288 raise PremiumAuthenticationError(message) from e 289 290 if self.premium is None: 291 return None 292 293 result = self._can_sync_data_from_server(new_account=create_new) 294 if create_new: 295 # if this is a new account, make sure the api keys are properly stored 296 # in the DB 297 if sync_database: 298 self._sync_if_allowed(sync_approval, result) 299 self.data.db.set_rotkehlchen_premium(self.premium.credentials) 300 else: 301 self._sync_if_allowed(sync_approval, result) 302 303 # Success, return premium 304 return self.premium 305 [end of rotkehlchen/premium/sync.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rotkehlchen/premium/sync.py b/rotkehlchen/premium/sync.py --- a/rotkehlchen/premium/sync.py +++ b/rotkehlchen/premium/sync.py @@ -235,6 +235,23 @@ log.info('User approved data sync from server') self._sync_data_from_server_and_replace_local() # this may raise due to password + def _abort_new_syncing_premium_user( + self, + username: str, + original_exception: PremiumAuthenticationError, + ) -> None: + """At this point we are at a new user trying to create an account with + premium API keys and we failed. But a directory was created. Remove it. + But create a backup of it in case something went really wrong + and the directory contained data we did not want to lose""" + shutil.move( + self.data.user_data_dir, # type: ignore + self.data.data_directory / f'auto_backup_{username}_{ts_now()}', + ) + raise PremiumAuthenticationError( + f'Could not verify keys for the new account. {str(original_exception)}', # noqa: E501 + ) from original_exception + def try_premium_at_start( self, given_premium_credentials: Optional[PremiumCredentials], @@ -259,19 +276,7 @@ try: self.premium = premium_create_and_verify(given_premium_credentials) except PremiumAuthenticationError as e: - log.error('Given API key is invalid') - # At this point we are at a new user trying to create an account with - # premium API keys and we failed. But a directory was created. Remove it. - # But create a backup of it in case something went really wrong - # and the directory contained data we did not want to lose - shutil.move( - self.data.user_data_dir, # type: ignore - self.data.data_directory / f'auto_backup_{username}_{ts_now()}', - ) - raise PremiumAuthenticationError( - 'Could not verify keys for the new account. ' - '{}'.format(str(e)), - ) from e + self._abort_new_syncing_premium_user(username=username, original_exception=e) # else, if we got premium data in the DB initialize it and try to sync with the server db_credentials = self.data.db.get_rotkehlchen_premium() @@ -295,7 +300,11 @@ # if this is a new account, make sure the api keys are properly stored # in the DB if sync_database: - self._sync_if_allowed(sync_approval, result) + try: + self._sync_if_allowed(sync_approval, result) + except PremiumAuthenticationError as e: + self._abort_new_syncing_premium_user(username=username, original_exception=e) + self.data.db.set_rotkehlchen_premium(self.premium.credentials) else: self._sync_if_allowed(sync_approval, result)
{"golden_diff": "diff --git a/rotkehlchen/premium/sync.py b/rotkehlchen/premium/sync.py\n--- a/rotkehlchen/premium/sync.py\n+++ b/rotkehlchen/premium/sync.py\n@@ -235,6 +235,23 @@\n log.info('User approved data sync from server')\n self._sync_data_from_server_and_replace_local() # this may raise due to password\n \n+ def _abort_new_syncing_premium_user(\n+ self,\n+ username: str,\n+ original_exception: PremiumAuthenticationError,\n+ ) -> None:\n+ \"\"\"At this point we are at a new user trying to create an account with\n+ premium API keys and we failed. But a directory was created. Remove it.\n+ But create a backup of it in case something went really wrong\n+ and the directory contained data we did not want to lose\"\"\"\n+ shutil.move(\n+ self.data.user_data_dir, # type: ignore\n+ self.data.data_directory / f'auto_backup_{username}_{ts_now()}',\n+ )\n+ raise PremiumAuthenticationError(\n+ f'Could not verify keys for the new account. {str(original_exception)}', # noqa: E501\n+ ) from original_exception\n+\n def try_premium_at_start(\n self,\n given_premium_credentials: Optional[PremiumCredentials],\n@@ -259,19 +276,7 @@\n try:\n self.premium = premium_create_and_verify(given_premium_credentials)\n except PremiumAuthenticationError as e:\n- log.error('Given API key is invalid')\n- # At this point we are at a new user trying to create an account with\n- # premium API keys and we failed. But a directory was created. Remove it.\n- # But create a backup of it in case something went really wrong\n- # and the directory contained data we did not want to lose\n- shutil.move(\n- self.data.user_data_dir, # type: ignore\n- self.data.data_directory / f'auto_backup_{username}_{ts_now()}',\n- )\n- raise PremiumAuthenticationError(\n- 'Could not verify keys for the new account. '\n- '{}'.format(str(e)),\n- ) from e\n+ self._abort_new_syncing_premium_user(username=username, original_exception=e)\n \n # else, if we got premium data in the DB initialize it and try to sync with the server\n db_credentials = self.data.db.get_rotkehlchen_premium()\n@@ -295,7 +300,11 @@\n # if this is a new account, make sure the api keys are properly stored\n # in the DB\n if sync_database:\n- self._sync_if_allowed(sync_approval, result)\n+ try:\n+ self._sync_if_allowed(sync_approval, result)\n+ except PremiumAuthenticationError as e:\n+ self._abort_new_syncing_premium_user(username=username, original_exception=e)\n+\n self.data.db.set_rotkehlchen_premium(self.premium.credentials)\n else:\n self._sync_if_allowed(sync_approval, result)\n", "issue": "User already exists during setup (DAppNode)\n## Problem Definition\r\n\r\nUsing DAppNode Package:\r\n\r\n1. Install Rotki\r\n2. Setup Rotki using Premium API Key with restore sync'd database option\r\n3. Enter incorrect password\r\n4. Get error that password could not decrypt database\r\n5. Go back\r\n6. Enter correct password\r\n7. \"User $USERNAME already exists. User data dir: /data/$USERNAME\"\r\n\r\nCan't proceed...have to delete and re-create Rotki package\r\n\r\n## Logs\r\n\r\nNone\r\n\r\n### System Description\r\n\r\nOperating system: DAppNode\r\nRotki version: DAppNode Package `0.1.9 (v1.23.1 upstream)`\r\n\n", "before_files": [{"content": "import base64\nimport logging\nimport shutil\nfrom enum import Enum\nfrom typing import Any, Dict, NamedTuple, Optional, Tuple\n\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.data_handler import DataHandler\nfrom rotkehlchen.errors import (\n PremiumAuthenticationError,\n RemoteError,\n RotkehlchenPermissionError,\n UnableToDecryptRemoteData,\n)\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.premium.premium import Premium, PremiumCredentials, premium_create_and_verify\nfrom rotkehlchen.utils.misc import ts_now\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\nclass CanSync(Enum):\n YES = 0\n NO = 1\n ASK_USER = 2\n\n\nclass SyncCheckResult(NamedTuple):\n # The result of the sync check\n can_sync: CanSync\n # If result is ASK_USER, what should the message be?\n message: str\n payload: Optional[Dict[str, Any]]\n\n\nclass PremiumSyncManager():\n\n def __init__(self, data: DataHandler, password: str) -> None:\n # Initialize this with the value saved in the DB\n self.last_data_upload_ts = data.db.get_last_data_upload_ts()\n self.data = data\n self.password = password\n self.premium: Optional[Premium] = None\n\n def _can_sync_data_from_server(self, new_account: bool) -> SyncCheckResult:\n \"\"\"\n Checks if the remote data can be pulled from the server.\n\n Returns a SyncCheckResult denoting whether we can pull for sure,\n whether we can't pull or whether the user should be asked. If the user\n should be asked a message is also returned\n \"\"\"\n log.debug('can sync data from server -- start')\n if self.premium is None:\n return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None)\n\n b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password)\n\n try:\n metadata = self.premium.query_last_data_metadata()\n except RemoteError as e:\n log.debug('can sync data from server failed', error=str(e))\n return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None)\n\n if new_account:\n return SyncCheckResult(can_sync=CanSync.YES, message='', payload=None)\n\n if not self.data.db.get_premium_sync():\n # If it's not a new account and the db setting for premium syncing is off stop\n return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None)\n\n log.debug(\n 'CAN_PULL',\n ours=our_hash,\n theirs=metadata.data_hash,\n )\n if our_hash == metadata.data_hash:\n log.debug('sync from server stopped -- same hash')\n # same hash -- no need to get anything\n return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None)\n\n our_last_write_ts = self.data.db.get_last_write_ts()\n data_bytes_size = len(base64.b64decode(b64_encoded_data))\n\n local_more_recent = our_last_write_ts >= metadata.last_modify_ts\n\n if local_more_recent:\n log.debug('sync from server stopped -- local is newer')\n return SyncCheckResult(can_sync=CanSync.NO, message='', payload=None)\n\n # else remote is bigger\n return SyncCheckResult(\n can_sync=CanSync.ASK_USER,\n message='Detected remote database with bigger size than the local one. ',\n payload={\n 'local_size': data_bytes_size,\n 'remote_size': metadata.data_size,\n 'local_last_modified': our_last_write_ts,\n 'remote_last_modified': metadata.last_modify_ts,\n },\n )\n\n def _sync_data_from_server_and_replace_local(self) -> Tuple[bool, str]:\n \"\"\"\n Performs syncing of data from server and replaces local db\n\n Returns true for success and False for error/failure\n\n May raise:\n - PremiumAuthenticationError due to an UnableToDecryptRemoteData\n coming from decompress_and_decrypt_db. This happens when the given password\n does not match the one on the saved DB.\n \"\"\"\n if self.premium is None:\n return False, 'Pulling failed. User does not have active premium.'\n\n try:\n result = self.premium.pull_data()\n except RemoteError as e:\n log.debug('sync from server -- pulling failed.', error=str(e))\n return False, f'Pulling failed: {str(e)}'\n\n if result['data'] is None:\n log.debug('sync from server -- no data found.')\n return False, 'No data found'\n\n try:\n self.data.decompress_and_decrypt_db(self.password, result['data'])\n except UnableToDecryptRemoteData as e:\n raise PremiumAuthenticationError(\n 'The given password can not unlock the database that was retrieved from '\n 'the server. Make sure to use the same password as when the account was created.',\n ) from e\n\n return True, ''\n\n def maybe_upload_data_to_server(self, force_upload: bool = False) -> bool:\n # if user has no premium do nothing\n if self.premium is None:\n return False\n\n if not self.data.db.get_premium_sync() and not force_upload:\n return False\n\n # upload only once per hour\n diff = ts_now() - self.last_data_upload_ts\n if diff < 3600 and not force_upload:\n return False\n\n try:\n metadata = self.premium.query_last_data_metadata()\n except RemoteError as e:\n log.debug('upload to server -- fetching metadata error', error=str(e))\n return False\n b64_encoded_data, our_hash = self.data.compress_and_encrypt_db(self.password)\n\n log.debug(\n 'CAN_PUSH',\n ours=our_hash,\n theirs=metadata.data_hash,\n )\n if our_hash == metadata.data_hash and not force_upload:\n log.debug('upload to server stopped -- same hash')\n # same hash -- no need to upload anything\n return False\n\n our_last_write_ts = self.data.db.get_last_write_ts()\n if our_last_write_ts <= metadata.last_modify_ts and not force_upload:\n # Server's DB was modified after our local DB\n log.debug(\n f'upload to server stopped -- remote db({metadata.last_modify_ts}) '\n f'more recent than local({our_last_write_ts})',\n )\n return False\n\n data_bytes_size = len(base64.b64decode(b64_encoded_data))\n if data_bytes_size < metadata.data_size and not force_upload:\n # Let's be conservative.\n # TODO: Here perhaps prompt user in the future\n log.debug(\n f'upload to server stopped -- remote db({metadata.data_size}) '\n f'bigger than local({data_bytes_size})',\n )\n return False\n\n try:\n self.premium.upload_data(\n data_blob=b64_encoded_data,\n our_hash=our_hash,\n last_modify_ts=our_last_write_ts,\n compression_type='zlib',\n )\n except RemoteError as e:\n log.debug('upload to server -- upload error', error=str(e))\n return False\n\n # update the last data upload value\n self.last_data_upload_ts = ts_now()\n self.data.db.update_last_data_upload_ts(self.last_data_upload_ts)\n log.debug('upload to server -- success')\n return True\n\n def sync_data(self, action: Literal['upload', 'download']) -> Tuple[bool, str]:\n msg = ''\n\n if action == 'upload':\n success = self.maybe_upload_data_to_server(force_upload=True)\n\n if not success:\n msg = 'Upload failed'\n return success, msg\n\n return self._sync_data_from_server_and_replace_local()\n\n def _sync_if_allowed(\n self,\n sync_approval: Literal['yes', 'no', 'unknown'],\n result: SyncCheckResult,\n ) -> None:\n if result.can_sync == CanSync.ASK_USER:\n if sync_approval == 'unknown':\n log.info('Remote DB is possibly newer. Ask user.')\n raise RotkehlchenPermissionError(result.message, result.payload)\n\n if sync_approval == 'yes':\n log.info('User approved data sync from server')\n # this may raise due to password\n self._sync_data_from_server_and_replace_local()\n\n else:\n log.debug('Could sync data from server but user refused')\n elif result.can_sync == CanSync.YES:\n log.info('User approved data sync from server')\n self._sync_data_from_server_and_replace_local() # this may raise due to password\n\n def try_premium_at_start(\n self,\n given_premium_credentials: Optional[PremiumCredentials],\n username: str,\n create_new: bool,\n sync_approval: Literal['yes', 'no', 'unknown'],\n sync_database: bool,\n ) -> Optional[Premium]:\n \"\"\"\n Check if new user provided api pair or we already got one in the DB\n\n Returns the created premium if user's premium credentials were fine.\n\n If not it will raise PremiumAuthenticationError.\n\n If no credentials were given it returns None\n \"\"\"\n\n if given_premium_credentials is not None:\n assert create_new, 'We should never get here for an already existing account'\n\n try:\n self.premium = premium_create_and_verify(given_premium_credentials)\n except PremiumAuthenticationError as e:\n log.error('Given API key is invalid')\n # At this point we are at a new user trying to create an account with\n # premium API keys and we failed. But a directory was created. Remove it.\n # But create a backup of it in case something went really wrong\n # and the directory contained data we did not want to lose\n shutil.move(\n self.data.user_data_dir, # type: ignore\n self.data.data_directory / f'auto_backup_{username}_{ts_now()}',\n )\n raise PremiumAuthenticationError(\n 'Could not verify keys for the new account. '\n '{}'.format(str(e)),\n ) from e\n\n # else, if we got premium data in the DB initialize it and try to sync with the server\n db_credentials = self.data.db.get_rotkehlchen_premium()\n if db_credentials:\n assert not create_new, 'We should never get here for a new account'\n try:\n self.premium = premium_create_and_verify(db_credentials)\n except PremiumAuthenticationError as e:\n message = (\n f'Could not authenticate with the rotkehlchen server with '\n f'the API keys found in the Database. Error: {str(e)}'\n )\n log.error(message)\n raise PremiumAuthenticationError(message) from e\n\n if self.premium is None:\n return None\n\n result = self._can_sync_data_from_server(new_account=create_new)\n if create_new:\n # if this is a new account, make sure the api keys are properly stored\n # in the DB\n if sync_database:\n self._sync_if_allowed(sync_approval, result)\n self.data.db.set_rotkehlchen_premium(self.premium.credentials)\n else:\n self._sync_if_allowed(sync_approval, result)\n\n # Success, return premium\n return self.premium\n", "path": "rotkehlchen/premium/sync.py"}]}
3,952
687
gh_patches_debug_18243
rasdani/github-patches
git_diff
plone__Products.CMFPlone-1465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CSS bundles generation breaks background images relative urls This is a bug related to PR #1300. </issue> <code> [start of Products/CMFPlone/resources/browser/combine.py] 1 from zExceptions import NotFound 2 from Acquisition import aq_base 3 from datetime import datetime 4 from plone.registry.interfaces import IRegistry 5 from plone.resource.file import FilesystemFile 6 from plone.resource.interfaces import IResourceDirectory 7 from Products.CMFPlone.interfaces import IBundleRegistry 8 from Products.CMFPlone.interfaces.resources import ( 9 OVERRIDE_RESOURCE_DIRECTORY_NAME, 10 ) 11 from StringIO import StringIO 12 from zope.component import getUtility 13 from zope.component import queryUtility 14 15 PRODUCTION_RESOURCE_DIRECTORY = "production" 16 17 18 def get_production_resource_directory(): 19 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 20 if persistent_directory is None: 21 return '' 22 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 23 try: 24 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 25 except NotFound: 26 return "%s/++unique++1" % PRODUCTION_RESOURCE_DIRECTORY 27 timestamp = production_folder.readFile('timestamp.txt') 28 return "%s/++unique++%s" % ( 29 PRODUCTION_RESOURCE_DIRECTORY, timestamp) 30 31 32 def get_resource(context, path): 33 resource = context.unrestrictedTraverse(path) 34 if isinstance(resource, FilesystemFile): 35 (directory, sep, filename) = path.rpartition('/') 36 return context.unrestrictedTraverse(directory).readFile(filename) 37 else: 38 if hasattr(aq_base(resource), 'GET'): 39 # for FileResource 40 return resource.GET() 41 else: 42 # any BrowserView 43 return resource() 44 45 46 def write_js(context, folder, meta_bundle): 47 registry = getUtility(IRegistry) 48 resources = [] 49 50 # default resources 51 if meta_bundle == 'default' and registry.records.get( 52 'plone.resources/jquery.js' 53 ): 54 resources.append(get_resource(context, 55 registry.records['plone.resources/jquery.js'].value)) 56 resources.append(get_resource(context, 57 registry.records['plone.resources.requirejs'].value)) 58 resources.append(get_resource(context, 59 registry.records['plone.resources.configjs'].value)) 60 61 # bundles 62 bundles = registry.collectionOfInterface( 63 IBundleRegistry, prefix="plone.bundles", check=False) 64 for bundle in bundles.values(): 65 if bundle.merge_with == meta_bundle: 66 resources.append(get_resource(context, bundle.jscompilation)) 67 68 fi = StringIO() 69 for script in resources: 70 fi.write(script + '\n') 71 folder.writeFile(meta_bundle + ".js", fi) 72 73 74 def write_css(context, folder, meta_bundle): 75 registry = getUtility(IRegistry) 76 resources = [] 77 78 bundles = registry.collectionOfInterface( 79 IBundleRegistry, prefix="plone.bundles", check=False) 80 for bundle in bundles.values(): 81 if bundle.merge_with == meta_bundle: 82 resources.append(get_resource(context, bundle.csscompilation)) 83 84 fi = StringIO() 85 for script in resources: 86 fi.write(script + '\n') 87 folder.writeFile(meta_bundle + ".css", fi) 88 89 90 def combine_bundles(context): 91 persistent_directory = queryUtility(IResourceDirectory, name="persistent") 92 if persistent_directory is None: 93 return 94 if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory: 95 persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME) 96 container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME] 97 if PRODUCTION_RESOURCE_DIRECTORY not in container: 98 container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY) 99 production_folder = container[PRODUCTION_RESOURCE_DIRECTORY] 100 101 # store timestamp 102 fi = StringIO() 103 fi.write(datetime.now().isoformat()) 104 production_folder.writeFile("timestamp.txt", fi) 105 106 # generate new combined bundles 107 write_js(context, production_folder, 'default') 108 write_js(context, production_folder, 'logged-in') 109 write_css(context, production_folder, 'default') 110 write_css(context, production_folder, 'logged-in') 111 [end of Products/CMFPlone/resources/browser/combine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py --- a/Products/CMFPlone/resources/browser/combine.py +++ b/Products/CMFPlone/resources/browser/combine.py @@ -1,3 +1,4 @@ +import re from zExceptions import NotFound from Acquisition import aq_base from datetime import datetime @@ -79,7 +80,15 @@ IBundleRegistry, prefix="plone.bundles", check=False) for bundle in bundles.values(): if bundle.merge_with == meta_bundle: - resources.append(get_resource(context, bundle.csscompilation)) + css = get_resource(context, bundle.csscompilation) + # Preserve relative urls: + # we prefix with '../'' any url not starting with '/' + # or http: or data: + css = re.sub( + r"""(url\(['"]?(?!['"]?([a-z]+:|\/)))""", + r'\1../', + css) + resources.append(css) fi = StringIO() for script in resources:
{"golden_diff": "diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py\n--- a/Products/CMFPlone/resources/browser/combine.py\n+++ b/Products/CMFPlone/resources/browser/combine.py\n@@ -1,3 +1,4 @@\n+import re\n from zExceptions import NotFound\n from Acquisition import aq_base\n from datetime import datetime\n@@ -79,7 +80,15 @@\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n- resources.append(get_resource(context, bundle.csscompilation))\n+ css = get_resource(context, bundle.csscompilation)\n+ # Preserve relative urls:\n+ # we prefix with '../'' any url not starting with '/'\n+ # or http: or data:\n+ css = re.sub(\n+ r\"\"\"(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))\"\"\",\n+ r'\\1../',\n+ css)\n+ resources.append(css)\n \n fi = StringIO()\n for script in resources:\n", "issue": "CSS bundles generation breaks background images relative urls\nThis is a bug related to PR #1300.\n\n", "before_files": [{"content": "from zExceptions import NotFound\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import (\n OVERRIDE_RESOURCE_DIRECTORY_NAME,\n)\nfrom StringIO import StringIO\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nPRODUCTION_RESOURCE_DIRECTORY = \"production\"\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return \"%s/++unique++1\" % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return \"%s/++unique++%s\" % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n resource = context.unrestrictedTraverse(path)\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n else:\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n return resource.GET()\n else:\n # any BrowserView\n return resource()\n\n\ndef write_js(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n # default resources\n if meta_bundle == 'default' and registry.records.get(\n 'plone.resources/jquery.js'\n ):\n resources.append(get_resource(context,\n registry.records['plone.resources/jquery.js'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.requirejs'].value))\n resources.append(get_resource(context,\n registry.records['plone.resources.configjs'].value))\n\n # bundles\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.jscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".js\", fi)\n\n\ndef write_css(context, folder, meta_bundle):\n registry = getUtility(IRegistry)\n resources = []\n\n bundles = registry.collectionOfInterface(\n IBundleRegistry, prefix=\"plone.bundles\", check=False)\n for bundle in bundles.values():\n if bundle.merge_with == meta_bundle:\n resources.append(get_resource(context, bundle.csscompilation))\n\n fi = StringIO()\n for script in resources:\n fi.write(script + '\\n')\n folder.writeFile(meta_bundle + \".css\", fi)\n\n\ndef combine_bundles(context):\n persistent_directory = queryUtility(IResourceDirectory, name=\"persistent\")\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile(\"timestamp.txt\", fi)\n\n # generate new combined bundles\n write_js(context, production_folder, 'default')\n write_js(context, production_folder, 'logged-in')\n write_css(context, production_folder, 'default')\n write_css(context, production_folder, 'logged-in')\n", "path": "Products/CMFPlone/resources/browser/combine.py"}]}
1,585
251
gh_patches_debug_2033
rasdani/github-patches
git_diff
googleapis__python-bigquery-802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ChunkedEncodingError is not retried when fetching data with list_rows() Original issue: https://github.com/googleapis/python-bigquery-storage/issues/242 A user reported that they saw an error in production when fetching table data with `Client.list_rows()`. That method uses the [default retry object](https://github.com/googleapis/python-bigquery/blob/7e0e2bafc4c3f98a4246100f504fd78a01a28e7d/google/cloud/bigquery/retry.py#L49), which currently does not consider `requests.exceptions.ChunkedEncodingError` retryable. (it does retry `requests.exceptions.ConnectionError`, but `ChunkedEncodingError` is not a subclass of that. </issue> <code> [start of google/cloud/bigquery/retry.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from google.api_core import exceptions 16 from google.api_core import retry 17 from google.auth import exceptions as auth_exceptions 18 import requests.exceptions 19 20 21 _RETRYABLE_REASONS = frozenset( 22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"] 23 ) 24 25 _UNSTRUCTURED_RETRYABLE_TYPES = ( 26 ConnectionError, 27 exceptions.TooManyRequests, 28 exceptions.InternalServerError, 29 exceptions.BadGateway, 30 requests.exceptions.ConnectionError, 31 auth_exceptions.TransportError, 32 ) 33 34 35 def _should_retry(exc): 36 """Predicate for determining when to retry. 37 38 We retry if and only if the 'reason' is 'backendError' 39 or 'rateLimitExceeded'. 40 """ 41 if not hasattr(exc, "errors") or len(exc.errors) == 0: 42 # Check for unstructured error returns, e.g. from GFE 43 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES) 44 45 reason = exc.errors[0]["reason"] 46 return reason in _RETRYABLE_REASONS 47 48 49 DEFAULT_RETRY = retry.Retry(predicate=_should_retry) 50 """The default retry object. 51 52 Any method with a ``retry`` parameter will be retried automatically, 53 with reasonable defaults. To disable retry, pass ``retry=None``. 54 To modify the default retry behavior, call a ``with_XXX`` method 55 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds, 56 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. 57 """ 58 [end of google/cloud/bigquery/retry.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py --- a/google/cloud/bigquery/retry.py +++ b/google/cloud/bigquery/retry.py @@ -27,6 +27,7 @@ exceptions.TooManyRequests, exceptions.InternalServerError, exceptions.BadGateway, + requests.exceptions.ChunkedEncodingError, requests.exceptions.ConnectionError, auth_exceptions.TransportError, )
{"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -27,6 +27,7 @@\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n+ requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n )\n", "issue": "ChunkedEncodingError is not retried when fetching data with list_rows()\nOriginal issue: https://github.com/googleapis/python-bigquery-storage/issues/242\r\n\r\nA user reported that they saw an error in production when fetching table data with `Client.list_rows()`. That method uses the [default retry object](https://github.com/googleapis/python-bigquery/blob/7e0e2bafc4c3f98a4246100f504fd78a01a28e7d/google/cloud/bigquery/retry.py#L49), which currently does not consider `requests.exceptions.ChunkedEncodingError` retryable.\r\n\r\n(it does retry `requests.exceptions.ConnectionError`, but `ChunkedEncodingError` is not a subclass of that.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ConnectionError,\n auth_exceptions.TransportError,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]}
1,261
92
gh_patches_debug_1899
rasdani/github-patches
git_diff
translate__translate-3683
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> setcontext is not working correctly for mounit Calling setcontext on mounit does currently nothing as it inherits code from base class: ``` python def setcontext(self, context): """Set the message context""" pass ``` I'd expect it to properly update context as it does for other storages. </issue> <code> [start of translate/storage/mo.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2007 Zuza Software Foundation 4 # 5 # the function "serialize" was derived from Python v2.4 6 # (Tools/i18n/msgfmt.py - function "generate"): 7 # Written by Martin v. Löwis <[email protected]> 8 # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. 9 # All rights reserved. 10 # original license: Python Software Foundation (version 2) 11 # 12 # 13 # This file is part of translate. 14 # 15 # translate is free software; you can redistribute it and/or modify 16 # it under the terms of the GNU General Public License as published by 17 # the Free Software Foundation; either version 2 of the License, or 18 # (at your option) any later version. 19 # 20 # translate is distributed in the hope that it will be useful, 21 # but WITHOUT ANY WARRANTY; without even the implied warranty of 22 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 # GNU General Public License for more details. 24 # 25 # You should have received a copy of the GNU General Public License 26 # along with this program; if not, see <http://www.gnu.org/licenses/>. 27 # 28 29 """Module for parsing Gettext .mo files for translation. 30 31 The coding of .mo files was produced from `Gettext documentation 32 <http://www.gnu.org/software/gettext/manual/gettext.html#MO-Files>`_, 33 Pythons msgfmt.py and by observing and testing existing .mo files in the wild. 34 35 The hash algorithm is implemented for MO files, this should result in 36 faster access of the MO file. The hash is optional for Gettext 37 and is not needed for reading or writing MO files, in this implementation 38 it is always on and does produce sometimes different results to Gettext 39 in very small files. 40 """ 41 42 import array 43 import re 44 import six 45 import struct 46 47 from translate.misc.multistring import multistring 48 from translate.storage import base, poheader 49 50 51 MO_MAGIC_NUMBER = 0x950412de 52 53 54 def mounpack(filename='messages.mo'): 55 """Helper to unpack Gettext MO files into a Python string""" 56 with open(filename, 'rb') as fh: 57 s = fh.read() 58 print("\\x%02x" * len(s) % tuple(map(ord, s))) 59 60 61 def my_swap4(result): 62 c0 = (result >> 0) & 0xff 63 c1 = (result >> 8) & 0xff 64 c2 = (result >> 16) & 0xff 65 c3 = (result >> 24) & 0xff 66 67 return (c0 << 24) | (c1 << 16) | (c2 << 8) | c3 68 69 70 def hashpjw(str_param): 71 HASHWORDBITS = 32 72 hval = 0 73 g = None 74 s = str_param 75 for s in str_param: 76 hval = hval << 4 77 hval += ord(s) if six.PY2 else s 78 g = hval & 0xf << (HASHWORDBITS - 4) 79 if (g != 0): 80 hval = hval ^ g >> (HASHWORDBITS - 8) 81 hval = hval ^ g 82 return hval 83 84 85 def get_next_prime_number(start): 86 # find the smallest prime number that is greater or equal "start" 87 88 def is_prime(num): 89 # special small numbers 90 if (num < 2) or (num == 4): 91 return False 92 if (num == 2) or (num == 3): 93 return True 94 # check for numbers > 4 95 for divider in range(2, num // 2): 96 if num % divider == 0: 97 return False 98 return True 99 100 candidate = start 101 while not is_prime(candidate): 102 candidate += 1 103 return candidate 104 105 106 class mounit(base.TranslationUnit): 107 """A class representing a .mo translation message.""" 108 109 def __init__(self, source=None, **kwargs): 110 self.msgctxt = [] 111 self.msgidcomments = [] 112 super(mounit, self).__init__(source) 113 114 def getcontext(self): 115 """Get the message context""" 116 # Still need to handle KDE comments 117 if self.msgctxt is None: 118 return None 119 return "".join(self.msgctxt) 120 121 def isheader(self): 122 """Is this a header entry?""" 123 return self.source == u"" 124 125 def istranslatable(self): 126 """Is this message translateable?""" 127 return bool(self.source) 128 129 130 class mofile(poheader.poheader, base.TranslationStore): 131 """A class representing a .mo file.""" 132 133 UnitClass = mounit 134 Name = "Gettext MO file" 135 Mimetypes = ["application/x-gettext-catalog", "application/x-mo"] 136 Extensions = ["mo", "gmo"] 137 _binary = True 138 139 def __init__(self, inputfile=None, **kwargs): 140 super(mofile, self).__init__(**kwargs) 141 self.filename = '' 142 if inputfile is not None: 143 self.parsestring(inputfile) 144 145 def serialize(self, out): 146 """Output a string representation of the MO data file""" 147 # check the header of this file for the copyright note of this function 148 149 def add_to_hash_table(string, i): 150 V = hashpjw(string) 151 # Taken from gettext-0.17:gettext-tools/src/write-mo.c:408-409 152 S = hash_size <= 2 and 3 or hash_size 153 hash_cursor = V % S 154 orig_hash_cursor = hash_cursor 155 increment = 1 + (V % (S - 2)) 156 while True: 157 index = hash_table[hash_cursor] 158 if (index == 0): 159 hash_table[hash_cursor] = i + 1 160 break 161 hash_cursor += increment 162 hash_cursor = hash_cursor % S 163 assert (hash_cursor != orig_hash_cursor) 164 165 def lst_encode(lst, join_char=b''): 166 return join_char.join([i.encode('utf-8') for i in lst]) 167 168 # hash_size should be the smallest prime number that is greater 169 # or equal (4 / 3 * N) - where N is the number of keys/units. 170 # see gettext-0.17:gettext-tools/src/write-mo.c:406 171 hash_size = get_next_prime_number(int((len(self.units) * 4) / 3)) 172 if hash_size <= 2: 173 hash_size = 3 174 MESSAGES = {} 175 for unit in self.units: 176 # If the unit is not translated, we should rather omit it entirely 177 if not unit.istranslated(): 178 continue 179 if isinstance(unit.source, multistring): 180 source = (lst_encode(unit.msgidcomments) + 181 lst_encode(unit.source.strings, b"\0")) 182 else: 183 source = lst_encode(unit.msgidcomments) + unit.source.encode('utf-8') 184 if unit.msgctxt: 185 source = lst_encode(unit.msgctxt) + b"\x04" + source 186 if isinstance(unit.target, multistring): 187 target = lst_encode(unit.target.strings, b"\0") 188 else: 189 target = unit.target.encode('utf-8') 190 if unit.target: 191 MESSAGES[source] = target 192 # using "I" works for 32- and 64-bit systems, but not for 16-bit! 193 hash_table = array.array("I", [0] * hash_size) 194 # the keys are sorted in the .mo file 195 keys = sorted(MESSAGES.keys()) 196 offsets = [] 197 ids = strs = b'' 198 for i, id in enumerate(keys): 199 # For each string, we need size and file offset. Each string is 200 # NUL terminated; the NUL does not count into the size. 201 # TODO: We don't do any encoding detection from the PO Header 202 add_to_hash_table(id, i) 203 string = MESSAGES[id] # id already encoded for use as dictionary key 204 offsets.append((len(ids), len(id), len(strs), len(string))) 205 ids = ids + id + b'\0' 206 strs = strs + string + b'\0' 207 output = '' 208 # The header is 7 32-bit unsigned integers 209 keystart = 7 * 4 + 16 * len(keys) + hash_size * 4 210 # and the values start after the keys 211 valuestart = keystart + len(ids) 212 koffsets = [] 213 voffsets = [] 214 # The string table first has the list of keys, then the list of values. 215 # Each entry has first the size of the string, then the file offset. 216 for o1, l1, o2, l2 in offsets: 217 koffsets = koffsets + [l1, o1 + keystart] 218 voffsets = voffsets + [l2, o2 + valuestart] 219 offsets = koffsets + voffsets 220 output = struct.pack("Iiiiiii", 221 MO_MAGIC_NUMBER, # Magic 222 0, # Version 223 len(keys), # # of entries 224 7 * 4, # start of key index 225 7 * 4 + len(keys) * 8, # start of value index 226 hash_size, # size of hash table 227 7 * 4 + 2 * (len(keys) * 8)) # offset of hash table 228 # additional data is not necessary for empty mo files 229 if (len(keys) > 0): 230 output = output + array.array("i", offsets).tostring() 231 output = output + hash_table.tostring() 232 output = output + ids 233 output = output + strs 234 return out.write(output) 235 236 def parse(self, input): 237 """parses the given file or file source string""" 238 if hasattr(input, 'name'): 239 self.filename = input.name 240 elif not getattr(self, 'filename', ''): 241 self.filename = '' 242 if hasattr(input, "read"): 243 mosrc = input.read() 244 input.close() 245 input = mosrc 246 little, = struct.unpack("<L", input[:4]) 247 big, = struct.unpack(">L", input[:4]) 248 if little == MO_MAGIC_NUMBER: 249 endian = "<" 250 elif big == MO_MAGIC_NUMBER: 251 endian = ">" 252 else: 253 raise ValueError("This is not an MO file") 254 magic, version_maj, version_min, lenkeys, startkey, \ 255 startvalue, sizehash, offsethash = struct.unpack("%sLHHiiiii" % endian, 256 input[:(7 * 4)]) 257 if version_maj >= 1: 258 raise base.ParseError("""Unable to process version %d.%d MO files""" % (version_maj, version_min)) 259 for i in range(lenkeys): 260 nextkey = startkey + (i * 2 * 4) 261 nextvalue = startvalue + (i * 2 * 4) 262 klength, koffset = struct.unpack("%sii" % endian, 263 input[nextkey:nextkey + (2 * 4)]) 264 vlength, voffset = struct.unpack("%sii" % endian, 265 input[nextvalue:nextvalue + (2 * 4)]) 266 source = input[koffset:koffset + klength] 267 context = None 268 if b"\x04" in source: 269 context, source = source.split(b"\x04") 270 # Still need to handle KDE comments 271 if source == "": 272 charset = re.search(b"charset=([^\\s]+)", 273 input[voffset:voffset + vlength]) 274 if charset: 275 self.encoding = charset.group(1) 276 source = multistring([s.decode(self.encoding) 277 for s in source.split(b"\0")]) 278 target = multistring([s.decode(self.encoding) 279 for s in input[voffset:voffset + vlength].split(b"\0")]) 280 newunit = mounit(source) 281 newunit.target = target 282 if context is not None: 283 newunit.msgctxt.append(context) 284 self.addunit(newunit) 285 [end of translate/storage/mo.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/translate/storage/mo.py b/translate/storage/mo.py --- a/translate/storage/mo.py +++ b/translate/storage/mo.py @@ -118,6 +118,9 @@ return None return "".join(self.msgctxt) + def setcontext(self, context): + self.msgctxt = [context] + def isheader(self): """Is this a header entry?""" return self.source == u""
{"golden_diff": "diff --git a/translate/storage/mo.py b/translate/storage/mo.py\n--- a/translate/storage/mo.py\n+++ b/translate/storage/mo.py\n@@ -118,6 +118,9 @@\n return None\n return \"\".join(self.msgctxt)\n \n+ def setcontext(self, context):\n+ self.msgctxt = [context]\n+\n def isheader(self):\n \"\"\"Is this a header entry?\"\"\"\n return self.source == u\"\"\n", "issue": "setcontext is not working correctly for mounit\nCalling setcontext on mounit does currently nothing as it inherits code from base class:\n\n``` python\n def setcontext(self, context):\n \"\"\"Set the message context\"\"\"\n pass\n```\n\nI'd expect it to properly update context as it does for other storages.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2007 Zuza Software Foundation\n#\n# the function \"serialize\" was derived from Python v2.4\n# (Tools/i18n/msgfmt.py - function \"generate\"):\n# Written by Martin v. L\u00f6wis <[email protected]>\n# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.\n# All rights reserved.\n# original license: Python Software Foundation (version 2)\n#\n#\n# This file is part of translate.\n#\n# translate is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# translate is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n#\n\n\"\"\"Module for parsing Gettext .mo files for translation.\n\nThe coding of .mo files was produced from `Gettext documentation\n<http://www.gnu.org/software/gettext/manual/gettext.html#MO-Files>`_,\nPythons msgfmt.py and by observing and testing existing .mo files in the wild.\n\nThe hash algorithm is implemented for MO files, this should result in\nfaster access of the MO file. The hash is optional for Gettext\nand is not needed for reading or writing MO files, in this implementation\nit is always on and does produce sometimes different results to Gettext\nin very small files.\n\"\"\"\n\nimport array\nimport re\nimport six\nimport struct\n\nfrom translate.misc.multistring import multistring\nfrom translate.storage import base, poheader\n\n\nMO_MAGIC_NUMBER = 0x950412de\n\n\ndef mounpack(filename='messages.mo'):\n \"\"\"Helper to unpack Gettext MO files into a Python string\"\"\"\n with open(filename, 'rb') as fh:\n s = fh.read()\n print(\"\\\\x%02x\" * len(s) % tuple(map(ord, s)))\n\n\ndef my_swap4(result):\n c0 = (result >> 0) & 0xff\n c1 = (result >> 8) & 0xff\n c2 = (result >> 16) & 0xff\n c3 = (result >> 24) & 0xff\n\n return (c0 << 24) | (c1 << 16) | (c2 << 8) | c3\n\n\ndef hashpjw(str_param):\n HASHWORDBITS = 32\n hval = 0\n g = None\n s = str_param\n for s in str_param:\n hval = hval << 4\n hval += ord(s) if six.PY2 else s\n g = hval & 0xf << (HASHWORDBITS - 4)\n if (g != 0):\n hval = hval ^ g >> (HASHWORDBITS - 8)\n hval = hval ^ g\n return hval\n\n\ndef get_next_prime_number(start):\n # find the smallest prime number that is greater or equal \"start\"\n\n def is_prime(num):\n # special small numbers\n if (num < 2) or (num == 4):\n return False\n if (num == 2) or (num == 3):\n return True\n # check for numbers > 4\n for divider in range(2, num // 2):\n if num % divider == 0:\n return False\n return True\n\n candidate = start\n while not is_prime(candidate):\n candidate += 1\n return candidate\n\n\nclass mounit(base.TranslationUnit):\n \"\"\"A class representing a .mo translation message.\"\"\"\n\n def __init__(self, source=None, **kwargs):\n self.msgctxt = []\n self.msgidcomments = []\n super(mounit, self).__init__(source)\n\n def getcontext(self):\n \"\"\"Get the message context\"\"\"\n # Still need to handle KDE comments\n if self.msgctxt is None:\n return None\n return \"\".join(self.msgctxt)\n\n def isheader(self):\n \"\"\"Is this a header entry?\"\"\"\n return self.source == u\"\"\n\n def istranslatable(self):\n \"\"\"Is this message translateable?\"\"\"\n return bool(self.source)\n\n\nclass mofile(poheader.poheader, base.TranslationStore):\n \"\"\"A class representing a .mo file.\"\"\"\n\n UnitClass = mounit\n Name = \"Gettext MO file\"\n Mimetypes = [\"application/x-gettext-catalog\", \"application/x-mo\"]\n Extensions = [\"mo\", \"gmo\"]\n _binary = True\n\n def __init__(self, inputfile=None, **kwargs):\n super(mofile, self).__init__(**kwargs)\n self.filename = ''\n if inputfile is not None:\n self.parsestring(inputfile)\n\n def serialize(self, out):\n \"\"\"Output a string representation of the MO data file\"\"\"\n # check the header of this file for the copyright note of this function\n\n def add_to_hash_table(string, i):\n V = hashpjw(string)\n # Taken from gettext-0.17:gettext-tools/src/write-mo.c:408-409\n S = hash_size <= 2 and 3 or hash_size\n hash_cursor = V % S\n orig_hash_cursor = hash_cursor\n increment = 1 + (V % (S - 2))\n while True:\n index = hash_table[hash_cursor]\n if (index == 0):\n hash_table[hash_cursor] = i + 1\n break\n hash_cursor += increment\n hash_cursor = hash_cursor % S\n assert (hash_cursor != orig_hash_cursor)\n\n def lst_encode(lst, join_char=b''):\n return join_char.join([i.encode('utf-8') for i in lst])\n\n # hash_size should be the smallest prime number that is greater\n # or equal (4 / 3 * N) - where N is the number of keys/units.\n # see gettext-0.17:gettext-tools/src/write-mo.c:406\n hash_size = get_next_prime_number(int((len(self.units) * 4) / 3))\n if hash_size <= 2:\n hash_size = 3\n MESSAGES = {}\n for unit in self.units:\n # If the unit is not translated, we should rather omit it entirely\n if not unit.istranslated():\n continue\n if isinstance(unit.source, multistring):\n source = (lst_encode(unit.msgidcomments) +\n lst_encode(unit.source.strings, b\"\\0\"))\n else:\n source = lst_encode(unit.msgidcomments) + unit.source.encode('utf-8')\n if unit.msgctxt:\n source = lst_encode(unit.msgctxt) + b\"\\x04\" + source\n if isinstance(unit.target, multistring):\n target = lst_encode(unit.target.strings, b\"\\0\")\n else:\n target = unit.target.encode('utf-8')\n if unit.target:\n MESSAGES[source] = target\n # using \"I\" works for 32- and 64-bit systems, but not for 16-bit!\n hash_table = array.array(\"I\", [0] * hash_size)\n # the keys are sorted in the .mo file\n keys = sorted(MESSAGES.keys())\n offsets = []\n ids = strs = b''\n for i, id in enumerate(keys):\n # For each string, we need size and file offset. Each string is\n # NUL terminated; the NUL does not count into the size.\n # TODO: We don't do any encoding detection from the PO Header\n add_to_hash_table(id, i)\n string = MESSAGES[id] # id already encoded for use as dictionary key\n offsets.append((len(ids), len(id), len(strs), len(string)))\n ids = ids + id + b'\\0'\n strs = strs + string + b'\\0'\n output = ''\n # The header is 7 32-bit unsigned integers\n keystart = 7 * 4 + 16 * len(keys) + hash_size * 4\n # and the values start after the keys\n valuestart = keystart + len(ids)\n koffsets = []\n voffsets = []\n # The string table first has the list of keys, then the list of values.\n # Each entry has first the size of the string, then the file offset.\n for o1, l1, o2, l2 in offsets:\n koffsets = koffsets + [l1, o1 + keystart]\n voffsets = voffsets + [l2, o2 + valuestart]\n offsets = koffsets + voffsets\n output = struct.pack(\"Iiiiiii\",\n MO_MAGIC_NUMBER, # Magic\n 0, # Version\n len(keys), # # of entries\n 7 * 4, # start of key index\n 7 * 4 + len(keys) * 8, # start of value index\n hash_size, # size of hash table\n 7 * 4 + 2 * (len(keys) * 8)) # offset of hash table\n # additional data is not necessary for empty mo files\n if (len(keys) > 0):\n output = output + array.array(\"i\", offsets).tostring()\n output = output + hash_table.tostring()\n output = output + ids\n output = output + strs\n return out.write(output)\n\n def parse(self, input):\n \"\"\"parses the given file or file source string\"\"\"\n if hasattr(input, 'name'):\n self.filename = input.name\n elif not getattr(self, 'filename', ''):\n self.filename = ''\n if hasattr(input, \"read\"):\n mosrc = input.read()\n input.close()\n input = mosrc\n little, = struct.unpack(\"<L\", input[:4])\n big, = struct.unpack(\">L\", input[:4])\n if little == MO_MAGIC_NUMBER:\n endian = \"<\"\n elif big == MO_MAGIC_NUMBER:\n endian = \">\"\n else:\n raise ValueError(\"This is not an MO file\")\n magic, version_maj, version_min, lenkeys, startkey, \\\n startvalue, sizehash, offsethash = struct.unpack(\"%sLHHiiiii\" % endian,\n input[:(7 * 4)])\n if version_maj >= 1:\n raise base.ParseError(\"\"\"Unable to process version %d.%d MO files\"\"\" % (version_maj, version_min))\n for i in range(lenkeys):\n nextkey = startkey + (i * 2 * 4)\n nextvalue = startvalue + (i * 2 * 4)\n klength, koffset = struct.unpack(\"%sii\" % endian,\n input[nextkey:nextkey + (2 * 4)])\n vlength, voffset = struct.unpack(\"%sii\" % endian,\n input[nextvalue:nextvalue + (2 * 4)])\n source = input[koffset:koffset + klength]\n context = None\n if b\"\\x04\" in source:\n context, source = source.split(b\"\\x04\")\n # Still need to handle KDE comments\n if source == \"\":\n charset = re.search(b\"charset=([^\\\\s]+)\",\n input[voffset:voffset + vlength])\n if charset:\n self.encoding = charset.group(1)\n source = multistring([s.decode(self.encoding)\n for s in source.split(b\"\\0\")])\n target = multistring([s.decode(self.encoding)\n for s in input[voffset:voffset + vlength].split(b\"\\0\")])\n newunit = mounit(source)\n newunit.target = target\n if context is not None:\n newunit.msgctxt.append(context)\n self.addunit(newunit)\n", "path": "translate/storage/mo.py"}]}
4,065
104
gh_patches_debug_5179
rasdani/github-patches
git_diff
lutris__lutris-2653
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Impossible login to GOG.com: invalid cookie ? **Describe the bug** I can't connect Lutris with my GOG.com account: the GOG.com credentials window is here, I can fill it, but when I confirm this form, the window closes and… nothing more. My account isn't connected, and if I close the "Import games" window, I can't reopen it unless I delete '.cache/lutris' directory. **Expected behavior** I should have a second GOG.com form asking for a PIN code, then my account should be connected in Lutris. **Current behavior** As in description above. Plus technical details: * two files are created in '~/.cache/lutris/': .gog.auth and .gog.token (attached as [gog.auth.txt](https://github.com/lutris/lutris/files/4309081/gog.auth.txt) and [gog.token.txt](https://github.com/lutris/lutris/files/4309083/gog.token.txt)) * according to standard output in terminal, some problems with Python, ending with an invalid cookie? (see [lutris.txt](https://github.com/lutris/lutris/files/4309117/lutris.txt)) **Steps to reproduce** It happens while importing GOG games from the app or installing GOG games from the website. **System information** Fresh Lutris install, Arch Linux, kernel 5.5.8. More in [lutris.log](https://github.com/lutris/lutris/files/4309125/lutris.log) </issue> <code> [start of lutris/util/cookies.py] 1 import time 2 from http.cookiejar import MozillaCookieJar, Cookie, _warn_unhandled_exception 3 4 5 class WebkitCookieJar(MozillaCookieJar): 6 """Subclass of MozillaCookieJar for compatibility with cookies 7 coming from Webkit2. 8 This disables the magic_re header which is not present and adds 9 compatibility with HttpOnly cookies (See http://bugs.python.org/issue2190) 10 """ 11 12 def _really_load(self, f, filename, ignore_discard, ignore_expires): 13 now = time.time() 14 try: 15 while 1: 16 line = f.readline() 17 if line == "": 18 break 19 20 # last field may be absent, so keep any trailing tab 21 if line.endswith("\n"): 22 line = line[:-1] 23 24 sline = line.strip() 25 # support HttpOnly cookies (as stored by curl or old Firefox). 26 if sline.startswith("#HttpOnly_"): 27 line = sline[10:] 28 elif sline.startswith("#") or sline == "": 29 continue 30 31 domain, domain_specified, path, secure, expires, name, value = line.split( 32 "\t" 33 ) 34 secure = secure == "TRUE" 35 domain_specified = domain_specified == "TRUE" 36 if name == "": 37 # cookies.txt regards 'Set-Cookie: foo' as a cookie 38 # with no name, whereas http.cookiejar regards it as a 39 # cookie with no value. 40 name = value 41 value = None 42 43 initial_dot = domain.startswith(".") 44 assert domain_specified == initial_dot 45 46 discard = False 47 if expires == "": 48 expires = None 49 discard = True 50 51 # assume path_specified is false 52 c = Cookie( 53 0, 54 name, 55 value, 56 None, 57 False, 58 domain, 59 domain_specified, 60 initial_dot, 61 path, 62 False, 63 secure, 64 expires, 65 discard, 66 None, 67 None, 68 {}, 69 ) 70 if not ignore_discard and c.discard: 71 continue 72 if not ignore_expires and c.is_expired(now): 73 continue 74 self.set_cookie(c) 75 76 except OSError: 77 raise 78 except Exception: 79 _warn_unhandled_exception() 80 raise OSError( 81 "invalid Netscape format cookies file %r: %r" % (filename, line) 82 ) 83 [end of lutris/util/cookies.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/cookies.py b/lutris/util/cookies.py --- a/lutris/util/cookies.py +++ b/lutris/util/cookies.py @@ -28,7 +28,7 @@ elif sline.startswith("#") or sline == "": continue - domain, domain_specified, path, secure, expires, name, value = line.split( + domain, domain_specified, path, secure, expires, name, value, aditional_info = line.split( "\t" ) secure = secure == "TRUE"
{"golden_diff": "diff --git a/lutris/util/cookies.py b/lutris/util/cookies.py\n--- a/lutris/util/cookies.py\n+++ b/lutris/util/cookies.py\n@@ -28,7 +28,7 @@\n elif sline.startswith(\"#\") or sline == \"\":\n continue\n \n- domain, domain_specified, path, secure, expires, name, value = line.split(\n+ domain, domain_specified, path, secure, expires, name, value, aditional_info = line.split(\n \"\\t\"\n )\n secure = secure == \"TRUE\"\n", "issue": "Impossible login to GOG.com: invalid cookie ?\n**Describe the bug**\r\n\r\nI can't connect Lutris with my GOG.com account: the GOG.com credentials window is here, I can fill it, but when I confirm this form, the window closes and\u2026 nothing more. My account isn't connected, and if I close the \"Import games\" window, I can't reopen it unless I delete '.cache/lutris' directory. \r\n\r\n**Expected behavior**\r\n\r\nI should have a second GOG.com form asking for a PIN code, then my account should be connected in Lutris.\r\n\r\n**Current behavior**\r\n\r\nAs in description above. Plus technical details:\r\n\r\n* two files are created in '~/.cache/lutris/': .gog.auth and .gog.token (attached as [gog.auth.txt](https://github.com/lutris/lutris/files/4309081/gog.auth.txt) and [gog.token.txt](https://github.com/lutris/lutris/files/4309083/gog.token.txt))\r\n* according to standard output in terminal, some problems with Python, ending with an invalid cookie? (see [lutris.txt](https://github.com/lutris/lutris/files/4309117/lutris.txt))\r\n\r\n**Steps to reproduce**\r\n\r\nIt happens while importing GOG games from the app or installing GOG games from the website.\r\n\r\n**System information**\r\n\r\nFresh Lutris install, Arch Linux, kernel 5.5.8. More in [lutris.log](https://github.com/lutris/lutris/files/4309125/lutris.log)\n", "before_files": [{"content": "import time\nfrom http.cookiejar import MozillaCookieJar, Cookie, _warn_unhandled_exception\n\n\nclass WebkitCookieJar(MozillaCookieJar):\n \"\"\"Subclass of MozillaCookieJar for compatibility with cookies\n coming from Webkit2.\n This disables the magic_re header which is not present and adds\n compatibility with HttpOnly cookies (See http://bugs.python.org/issue2190)\n \"\"\"\n\n def _really_load(self, f, filename, ignore_discard, ignore_expires):\n now = time.time()\n try:\n while 1:\n line = f.readline()\n if line == \"\":\n break\n\n # last field may be absent, so keep any trailing tab\n if line.endswith(\"\\n\"):\n line = line[:-1]\n\n sline = line.strip()\n # support HttpOnly cookies (as stored by curl or old Firefox).\n if sline.startswith(\"#HttpOnly_\"):\n line = sline[10:]\n elif sline.startswith(\"#\") or sline == \"\":\n continue\n\n domain, domain_specified, path, secure, expires, name, value = line.split(\n \"\\t\"\n )\n secure = secure == \"TRUE\"\n domain_specified = domain_specified == \"TRUE\"\n if name == \"\":\n # cookies.txt regards 'Set-Cookie: foo' as a cookie\n # with no name, whereas http.cookiejar regards it as a\n # cookie with no value.\n name = value\n value = None\n\n initial_dot = domain.startswith(\".\")\n assert domain_specified == initial_dot\n\n discard = False\n if expires == \"\":\n expires = None\n discard = True\n\n # assume path_specified is false\n c = Cookie(\n 0,\n name,\n value,\n None,\n False,\n domain,\n domain_specified,\n initial_dot,\n path,\n False,\n secure,\n expires,\n discard,\n None,\n None,\n {},\n )\n if not ignore_discard and c.discard:\n continue\n if not ignore_expires and c.is_expired(now):\n continue\n self.set_cookie(c)\n\n except OSError:\n raise\n except Exception:\n _warn_unhandled_exception()\n raise OSError(\n \"invalid Netscape format cookies file %r: %r\" % (filename, line)\n )\n", "path": "lutris/util/cookies.py"}]}
1,557
127
gh_patches_debug_25826
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1965
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> We need a way to specify Preproc function for base_grouping_column ## Description In partial summarization, when the `grouping_expressions` also include the `base_grouping_column`, "guessing" does not work. * We need a place to specify `preproc` functions for the `base_grouping_column`. We should either allow additionally specifying it within `grouping_expressions` or extend `base_grouping_column` to include `preproc`. * If we do not allow it within `grouping_expressions`, the "guessed" response should not include it within `grouping_expressions`. Currently, it is also included. cc: @dmos62 @mathemancer </issue> <code> [start of db/transforms/operations/finish_specifying.py] 1 from db.tables.operations import select as tables_select 2 from db.tables.operations.select import get_joinable_tables 3 from db.transforms.base import Summarize, SelectSubsetOfColumns 4 from db.columns.operations.select import get_column_from_oid_and_attnum 5 6 7 def finish_specifying_summarize_transform( 8 db_query, ix_of_summarize_transform, engine, metadata 9 ): 10 """ 11 Adds missing input aliases to the summarize transform. 12 13 Will find input aliases that are not mentioned in the summarize_transform and will add each 14 of them to its group-by set and/or aggregate-on set. 15 16 If the user selected input alias (summarize's base grouping column) is not unique-constrained, 17 will put the unmentioned input aliases in the aggregation set. 18 19 If the user selected input alias (summarize's base grouping column) is unique-constrained, 20 then it might put at least some input aliases in the grouping set, depending on what 21 _should_group_by returns. 22 """ 23 summarize_transform = db_query.transformations[ix_of_summarize_transform] 24 assert type(summarize_transform) is Summarize 25 aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on = \ 26 _split_missing_input_aliases_into_group_and_agg_lists( 27 db_query, ix_of_summarize_transform, engine, metadata 28 ) 29 there_is_something_to_add = \ 30 aliases_to_be_added_to_group_by or aliases_to_be_added_to_group_by 31 if there_is_something_to_add: 32 summarize_transform = \ 33 summarize_transform.get_new_with_aliases_added_to_agg_on( 34 aliases_to_be_added_to_agg_on 35 ) 36 summarize_transform = \ 37 summarize_transform.get_new_with_aliases_added_to_group_by( 38 aliases_to_be_added_to_group_by 39 ) 40 return summarize_transform 41 42 43 def _split_missing_input_aliases_into_group_and_agg_lists( 44 db_query, ix_of_summarize_transform, engine, metadata, 45 ): 46 aliases_to_be_added_to_group_by = [] 47 aliases_to_be_added_to_agg_on = [] 48 summarize_transform = db_query.transformations[ix_of_summarize_transform] 49 initial_columns_not_in_summarize = \ 50 _get_initial_columns_not_in_summarize(db_query, summarize_transform) 51 if not initial_columns_not_in_summarize: 52 # If all input aliases for summarize transform are in the transform's group-by or 53 # aggregate-on sets, there's nothing to do. 54 return aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on 55 # A summarize transform has a base_grouping_column (which is an alias) around 56 # which our suggestions will be based. 57 base_grouping_alias = summarize_transform.base_grouping_column 58 base_grouping_initial_column = \ 59 _get_initial_column_by_alias( 60 db_query.initial_columns, base_grouping_alias 61 ) 62 # We'll always want user-selected alias (base_grouping_column) in the "group-by set"; 63 if base_grouping_initial_column in initial_columns_not_in_summarize: 64 aliases_to_be_added_to_group_by.append(base_grouping_alias) 65 initial_columns_not_in_summarize.remove(base_grouping_initial_column) 66 # Most of logic in the rest of method is around whether or not we can add some of the other 67 # input aliases to the "group-by set"; otherwise we'll put them in "aggregate-on set". 68 can_add_other_aliases_to_group_by = ( 69 _is_first_alias_generating_transform( 70 db_query, ix_of_summarize_transform 71 ) 72 and _is_initial_column_unique_constrained( 73 base_grouping_initial_column, engine, metadata 74 ) 75 ) 76 if can_add_other_aliases_to_group_by: 77 oids_of_joinable_tables_with_single_results = \ 78 _get_oids_of_joinable_tables_with_single_results( 79 db_query, engine, metadata 80 ) 81 oid_of_base_grouping_initial_column = \ 82 _get_oid_of_initial_column(base_grouping_initial_column) 83 for initial_column in initial_columns_not_in_summarize: 84 if _should_group_by( 85 _get_oid_of_initial_column(initial_column), 86 oid_of_base_grouping_initial_column, 87 oids_of_joinable_tables_with_single_results, 88 ): 89 alias_set_to_add_to = aliases_to_be_added_to_group_by 90 else: 91 alias_set_to_add_to = aliases_to_be_added_to_agg_on 92 alias_set_to_add_to.append(initial_column.alias) 93 else: 94 aliases_to_be_added_to_agg_on = list( 95 initial_column.alias 96 for initial_column 97 in initial_columns_not_in_summarize 98 ) 99 return aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on 100 101 102 def _is_first_alias_generating_transform(db_query, ix_of_summarize_transform): 103 """ 104 Checks if the transform is the first alias-generating transform. An alias-generating transform 105 means that it itroduces new aliases (columns) to the transform pipeline. We want to know when 106 a given alias-generating transform is the first in the pipeline, because then we can consider 107 its input aliases to be fully described by initial columns, which can be a useful 108 simplification. 109 """ 110 prior_transforms = db_query.transformations[:ix_of_summarize_transform] 111 for prior_transform in prior_transforms: 112 alias_generating_transforms = {Summarize, SelectSubsetOfColumns} 113 is_alias_generating = \ 114 type(prior_transform) in alias_generating_transforms 115 if is_alias_generating: 116 return False 117 return True 118 119 120 def _get_initial_columns_not_in_summarize(db_query, summarize_transform): 121 initial_columns = db_query.initial_columns 122 group_by_aliases = summarize_transform.grouping_input_aliases 123 agg_on_aliases = summarize_transform.aggregation_input_aliases 124 aliases_in_summarize = group_by_aliases + agg_on_aliases 125 return [ 126 initial_column 127 for initial_column in 128 initial_columns 129 if initial_column.alias not in aliases_in_summarize 130 ] 131 132 133 def _get_initial_column_by_alias(initial_columns, alias): 134 for initial_column in initial_columns: 135 if initial_column.alias == alias: 136 return initial_column 137 138 139 def _should_group_by( 140 oid_of_initial_column, 141 oid_of_base_grouping_group_by_col, 142 oids_of_joinable_tables_with_single_results, 143 ): 144 """ 145 For the sake of efficiency, we're not checking here that base_grouping_group_by_col is unique 146 constrained: it is presumed that that is the case. 147 """ 148 is_on_table_of_base_grouping_column = \ 149 oid_of_initial_column == oid_of_base_grouping_group_by_col 150 is_single_result = \ 151 oid_of_initial_column in oids_of_joinable_tables_with_single_results 152 should_group_by = \ 153 is_on_table_of_base_grouping_column or is_single_result 154 return should_group_by 155 156 157 def _get_oids_of_joinable_tables_with_single_results( 158 db_query, engine, metadata, 159 ): 160 joinable_tables = \ 161 get_joinable_tables(engine, metadata, db_query.base_table_oid) 162 return set( 163 _get_oid_of_joinable_table(joinable_table) 164 for joinable_table 165 in joinable_tables 166 if _has_single_result(joinable_table) 167 ) 168 169 170 def _is_initial_column_unique_constrained(initial_column, engine, metadata): 171 oid = _get_oid_of_initial_column(initial_column) 172 attnum = initial_column.attnum 173 sa_column = get_column_from_oid_and_attnum( 174 table_oid=oid, 175 attnum=attnum, 176 engine=engine, 177 metadata=metadata, 178 ) 179 return _is_sa_column_unique_constrained(sa_column) 180 181 182 def _is_sa_column_unique_constrained(sa_column): 183 return sa_column.primary_key or sa_column.unique 184 185 186 def _get_oid_of_initial_column(initial_column): 187 return initial_column.reloid 188 189 190 def _get_oid_of_joinable_table(joinable_table): 191 joinable_table_oid = joinable_table[tables_select.TARGET] 192 return joinable_table_oid 193 194 195 def _has_single_result(joinable_table): 196 has_multiple_results = joinable_table[tables_select.MULTIPLE_RESULTS] 197 assert type(has_multiple_results) is bool 198 return not has_multiple_results 199 [end of db/transforms/operations/finish_specifying.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/db/transforms/operations/finish_specifying.py b/db/transforms/operations/finish_specifying.py --- a/db/transforms/operations/finish_specifying.py +++ b/db/transforms/operations/finish_specifying.py @@ -26,17 +26,16 @@ _split_missing_input_aliases_into_group_and_agg_lists( db_query, ix_of_summarize_transform, engine, metadata ) - there_is_something_to_add = \ - aliases_to_be_added_to_group_by or aliases_to_be_added_to_group_by - if there_is_something_to_add: - summarize_transform = \ - summarize_transform.get_new_with_aliases_added_to_agg_on( - aliases_to_be_added_to_agg_on - ) + if aliases_to_be_added_to_group_by: summarize_transform = \ summarize_transform.get_new_with_aliases_added_to_group_by( aliases_to_be_added_to_group_by ) + if aliases_to_be_added_to_agg_on: + summarize_transform = \ + summarize_transform.get_new_with_aliases_added_to_agg_on( + aliases_to_be_added_to_agg_on + ) return summarize_transform @@ -180,7 +179,10 @@ def _is_sa_column_unique_constrained(sa_column): - return sa_column.primary_key or sa_column.unique + return bool( + sa_column.primary_key + or sa_column.unique + ) def _get_oid_of_initial_column(initial_column):
{"golden_diff": "diff --git a/db/transforms/operations/finish_specifying.py b/db/transforms/operations/finish_specifying.py\n--- a/db/transforms/operations/finish_specifying.py\n+++ b/db/transforms/operations/finish_specifying.py\n@@ -26,17 +26,16 @@\n _split_missing_input_aliases_into_group_and_agg_lists(\n db_query, ix_of_summarize_transform, engine, metadata\n )\n- there_is_something_to_add = \\\n- aliases_to_be_added_to_group_by or aliases_to_be_added_to_group_by\n- if there_is_something_to_add:\n- summarize_transform = \\\n- summarize_transform.get_new_with_aliases_added_to_agg_on(\n- aliases_to_be_added_to_agg_on\n- )\n+ if aliases_to_be_added_to_group_by:\n summarize_transform = \\\n summarize_transform.get_new_with_aliases_added_to_group_by(\n aliases_to_be_added_to_group_by\n )\n+ if aliases_to_be_added_to_agg_on:\n+ summarize_transform = \\\n+ summarize_transform.get_new_with_aliases_added_to_agg_on(\n+ aliases_to_be_added_to_agg_on\n+ )\n return summarize_transform\n \n \n@@ -180,7 +179,10 @@\n \n \n def _is_sa_column_unique_constrained(sa_column):\n- return sa_column.primary_key or sa_column.unique\n+ return bool(\n+ sa_column.primary_key\n+ or sa_column.unique\n+ )\n \n \n def _get_oid_of_initial_column(initial_column):\n", "issue": "We need a way to specify Preproc function for base_grouping_column\n## Description\r\nIn partial summarization, when the `grouping_expressions` also include the `base_grouping_column`, \"guessing\" does not work.\r\n* We need a place to specify `preproc` functions for the `base_grouping_column`. We should either allow additionally specifying it within `grouping_expressions` or extend `base_grouping_column` to include `preproc`.\r\n* If we do not allow it within `grouping_expressions`, the \"guessed\" response should not include it within `grouping_expressions`. Currently, it is also included.\r\n\r\ncc: @dmos62 @mathemancer \n", "before_files": [{"content": "from db.tables.operations import select as tables_select\nfrom db.tables.operations.select import get_joinable_tables\nfrom db.transforms.base import Summarize, SelectSubsetOfColumns\nfrom db.columns.operations.select import get_column_from_oid_and_attnum\n\n\ndef finish_specifying_summarize_transform(\n db_query, ix_of_summarize_transform, engine, metadata\n):\n \"\"\"\n Adds missing input aliases to the summarize transform.\n\n Will find input aliases that are not mentioned in the summarize_transform and will add each\n of them to its group-by set and/or aggregate-on set.\n\n If the user selected input alias (summarize's base grouping column) is not unique-constrained,\n will put the unmentioned input aliases in the aggregation set.\n\n If the user selected input alias (summarize's base grouping column) is unique-constrained,\n then it might put at least some input aliases in the grouping set, depending on what\n _should_group_by returns.\n \"\"\"\n summarize_transform = db_query.transformations[ix_of_summarize_transform]\n assert type(summarize_transform) is Summarize\n aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on = \\\n _split_missing_input_aliases_into_group_and_agg_lists(\n db_query, ix_of_summarize_transform, engine, metadata\n )\n there_is_something_to_add = \\\n aliases_to_be_added_to_group_by or aliases_to_be_added_to_group_by\n if there_is_something_to_add:\n summarize_transform = \\\n summarize_transform.get_new_with_aliases_added_to_agg_on(\n aliases_to_be_added_to_agg_on\n )\n summarize_transform = \\\n summarize_transform.get_new_with_aliases_added_to_group_by(\n aliases_to_be_added_to_group_by\n )\n return summarize_transform\n\n\ndef _split_missing_input_aliases_into_group_and_agg_lists(\n db_query, ix_of_summarize_transform, engine, metadata,\n):\n aliases_to_be_added_to_group_by = []\n aliases_to_be_added_to_agg_on = []\n summarize_transform = db_query.transformations[ix_of_summarize_transform]\n initial_columns_not_in_summarize = \\\n _get_initial_columns_not_in_summarize(db_query, summarize_transform)\n if not initial_columns_not_in_summarize:\n # If all input aliases for summarize transform are in the transform's group-by or\n # aggregate-on sets, there's nothing to do.\n return aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on\n # A summarize transform has a base_grouping_column (which is an alias) around\n # which our suggestions will be based.\n base_grouping_alias = summarize_transform.base_grouping_column\n base_grouping_initial_column = \\\n _get_initial_column_by_alias(\n db_query.initial_columns, base_grouping_alias\n )\n # We'll always want user-selected alias (base_grouping_column) in the \"group-by set\";\n if base_grouping_initial_column in initial_columns_not_in_summarize:\n aliases_to_be_added_to_group_by.append(base_grouping_alias)\n initial_columns_not_in_summarize.remove(base_grouping_initial_column)\n # Most of logic in the rest of method is around whether or not we can add some of the other\n # input aliases to the \"group-by set\"; otherwise we'll put them in \"aggregate-on set\".\n can_add_other_aliases_to_group_by = (\n _is_first_alias_generating_transform(\n db_query, ix_of_summarize_transform\n )\n and _is_initial_column_unique_constrained(\n base_grouping_initial_column, engine, metadata\n )\n )\n if can_add_other_aliases_to_group_by:\n oids_of_joinable_tables_with_single_results = \\\n _get_oids_of_joinable_tables_with_single_results(\n db_query, engine, metadata\n )\n oid_of_base_grouping_initial_column = \\\n _get_oid_of_initial_column(base_grouping_initial_column)\n for initial_column in initial_columns_not_in_summarize:\n if _should_group_by(\n _get_oid_of_initial_column(initial_column),\n oid_of_base_grouping_initial_column,\n oids_of_joinable_tables_with_single_results,\n ):\n alias_set_to_add_to = aliases_to_be_added_to_group_by\n else:\n alias_set_to_add_to = aliases_to_be_added_to_agg_on\n alias_set_to_add_to.append(initial_column.alias)\n else:\n aliases_to_be_added_to_agg_on = list(\n initial_column.alias\n for initial_column\n in initial_columns_not_in_summarize\n )\n return aliases_to_be_added_to_group_by, aliases_to_be_added_to_agg_on\n\n\ndef _is_first_alias_generating_transform(db_query, ix_of_summarize_transform):\n \"\"\"\n Checks if the transform is the first alias-generating transform. An alias-generating transform\n means that it itroduces new aliases (columns) to the transform pipeline. We want to know when\n a given alias-generating transform is the first in the pipeline, because then we can consider\n its input aliases to be fully described by initial columns, which can be a useful\n simplification.\n \"\"\"\n prior_transforms = db_query.transformations[:ix_of_summarize_transform]\n for prior_transform in prior_transforms:\n alias_generating_transforms = {Summarize, SelectSubsetOfColumns}\n is_alias_generating = \\\n type(prior_transform) in alias_generating_transforms\n if is_alias_generating:\n return False\n return True\n\n\ndef _get_initial_columns_not_in_summarize(db_query, summarize_transform):\n initial_columns = db_query.initial_columns\n group_by_aliases = summarize_transform.grouping_input_aliases\n agg_on_aliases = summarize_transform.aggregation_input_aliases\n aliases_in_summarize = group_by_aliases + agg_on_aliases\n return [\n initial_column\n for initial_column in\n initial_columns\n if initial_column.alias not in aliases_in_summarize\n ]\n\n\ndef _get_initial_column_by_alias(initial_columns, alias):\n for initial_column in initial_columns:\n if initial_column.alias == alias:\n return initial_column\n\n\ndef _should_group_by(\n oid_of_initial_column,\n oid_of_base_grouping_group_by_col,\n oids_of_joinable_tables_with_single_results,\n):\n \"\"\"\n For the sake of efficiency, we're not checking here that base_grouping_group_by_col is unique\n constrained: it is presumed that that is the case.\n \"\"\"\n is_on_table_of_base_grouping_column = \\\n oid_of_initial_column == oid_of_base_grouping_group_by_col\n is_single_result = \\\n oid_of_initial_column in oids_of_joinable_tables_with_single_results\n should_group_by = \\\n is_on_table_of_base_grouping_column or is_single_result\n return should_group_by\n\n\ndef _get_oids_of_joinable_tables_with_single_results(\n db_query, engine, metadata,\n):\n joinable_tables = \\\n get_joinable_tables(engine, metadata, db_query.base_table_oid)\n return set(\n _get_oid_of_joinable_table(joinable_table)\n for joinable_table\n in joinable_tables\n if _has_single_result(joinable_table)\n )\n\n\ndef _is_initial_column_unique_constrained(initial_column, engine, metadata):\n oid = _get_oid_of_initial_column(initial_column)\n attnum = initial_column.attnum\n sa_column = get_column_from_oid_and_attnum(\n table_oid=oid,\n attnum=attnum,\n engine=engine,\n metadata=metadata,\n )\n return _is_sa_column_unique_constrained(sa_column)\n\n\ndef _is_sa_column_unique_constrained(sa_column):\n return sa_column.primary_key or sa_column.unique\n\n\ndef _get_oid_of_initial_column(initial_column):\n return initial_column.reloid\n\n\ndef _get_oid_of_joinable_table(joinable_table):\n joinable_table_oid = joinable_table[tables_select.TARGET]\n return joinable_table_oid\n\n\ndef _has_single_result(joinable_table):\n has_multiple_results = joinable_table[tables_select.MULTIPLE_RESULTS]\n assert type(has_multiple_results) is bool\n return not has_multiple_results\n", "path": "db/transforms/operations/finish_specifying.py"}]}
2,932
325
gh_patches_debug_42014
rasdani/github-patches
git_diff
goauthentik__authentik-5812
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Backchannel LDAP providers missing from outpost configuration page **Description** When creating an LDAP Provider, it will appear in the Outpost create/configuration page if it is assigned an application, but not if it's assigned as an application backchannel. This means we're unable to associate an LDAP backchannel to an outpost. **To Reproduce** Steps to reproduce the behavior: 1. Create a demo Application and an LDAP Provider 2. Assign the LDAP Provider as a _backchannel provider_ to the application. 3. Attempt to create an LDAP outpost. Notice the LDAP provider does not appear on the selectable list. **Expected behavior** The backchannel should appear the outposts "applications" list, so that it may be added to the outpost. **Version and Deployment (please complete the following information):** - authentik version: 2023.5.2 - Deployment: docker-compose </issue> <code> [start of authentik/providers/ldap/api.py] 1 """LDAPProvider API Views""" 2 from rest_framework.fields import CharField, ListField, SerializerMethodField 3 from rest_framework.serializers import ModelSerializer 4 from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet 5 6 from authentik.core.api.providers import ProviderSerializer 7 from authentik.core.api.used_by import UsedByMixin 8 from authentik.providers.ldap.models import LDAPProvider 9 10 11 class LDAPProviderSerializer(ProviderSerializer): 12 """LDAPProvider Serializer""" 13 14 outpost_set = ListField(child=CharField(), read_only=True, source="outpost_set.all") 15 16 class Meta: 17 model = LDAPProvider 18 fields = ProviderSerializer.Meta.fields + [ 19 "base_dn", 20 "search_group", 21 "certificate", 22 "tls_server_name", 23 "uid_start_number", 24 "gid_start_number", 25 "outpost_set", 26 "search_mode", 27 "bind_mode", 28 ] 29 extra_kwargs = ProviderSerializer.Meta.extra_kwargs 30 31 32 class LDAPProviderViewSet(UsedByMixin, ModelViewSet): 33 """LDAPProvider Viewset""" 34 35 queryset = LDAPProvider.objects.all() 36 serializer_class = LDAPProviderSerializer 37 filterset_fields = { 38 "application": ["isnull"], 39 "name": ["iexact"], 40 "authorization_flow__slug": ["iexact"], 41 "base_dn": ["iexact"], 42 "search_group__group_uuid": ["iexact"], 43 "search_group__name": ["iexact"], 44 "certificate__kp_uuid": ["iexact"], 45 "certificate__name": ["iexact"], 46 "tls_server_name": ["iexact"], 47 "uid_start_number": ["iexact"], 48 "gid_start_number": ["iexact"], 49 } 50 search_fields = ["name"] 51 ordering = ["name"] 52 53 54 class LDAPOutpostConfigSerializer(ModelSerializer): 55 """LDAPProvider Serializer""" 56 57 application_slug = SerializerMethodField() 58 bind_flow_slug = CharField(source="authorization_flow.slug") 59 60 def get_application_slug(self, instance: LDAPProvider) -> str: 61 """Prioritise backchannel slug over direct application slug""" 62 if instance.backchannel_application: 63 return instance.backchannel_application.slug 64 return instance.application.slug 65 66 class Meta: 67 model = LDAPProvider 68 fields = [ 69 "pk", 70 "name", 71 "base_dn", 72 "bind_flow_slug", 73 "application_slug", 74 "search_group", 75 "certificate", 76 "tls_server_name", 77 "uid_start_number", 78 "gid_start_number", 79 "search_mode", 80 "bind_mode", 81 ] 82 83 84 class LDAPOutpostConfigViewSet(ReadOnlyModelViewSet): 85 """LDAPProvider Viewset""" 86 87 queryset = LDAPProvider.objects.filter(application__isnull=False) 88 serializer_class = LDAPOutpostConfigSerializer 89 ordering = ["name"] 90 search_fields = ["name"] 91 filterset_fields = ["name"] 92 [end of authentik/providers/ldap/api.py] [start of authentik/core/api/providers.py] 1 """Provider API Views""" 2 from django.utils.translation import gettext_lazy as _ 3 from django_filters.filters import BooleanFilter 4 from django_filters.filterset import FilterSet 5 from drf_spectacular.utils import extend_schema 6 from rest_framework import mixins 7 from rest_framework.decorators import action 8 from rest_framework.fields import ReadOnlyField 9 from rest_framework.request import Request 10 from rest_framework.response import Response 11 from rest_framework.serializers import ModelSerializer, SerializerMethodField 12 from rest_framework.viewsets import GenericViewSet 13 14 from authentik.core.api.used_by import UsedByMixin 15 from authentik.core.api.utils import MetaNameSerializer, TypeCreateSerializer 16 from authentik.core.models import Provider 17 from authentik.lib.utils.reflection import all_subclasses 18 19 20 class ProviderSerializer(ModelSerializer, MetaNameSerializer): 21 """Provider Serializer""" 22 23 assigned_application_slug = ReadOnlyField(source="application.slug") 24 assigned_application_name = ReadOnlyField(source="application.name") 25 assigned_backchannel_application_slug = ReadOnlyField(source="backchannel_application.slug") 26 assigned_backchannel_application_name = ReadOnlyField(source="backchannel_application.name") 27 28 component = SerializerMethodField() 29 30 def get_component(self, obj: Provider) -> str: # pragma: no cover 31 """Get object component so that we know how to edit the object""" 32 if obj.__class__ == Provider: 33 return "" 34 return obj.component 35 36 class Meta: 37 model = Provider 38 fields = [ 39 "pk", 40 "name", 41 "authentication_flow", 42 "authorization_flow", 43 "property_mappings", 44 "component", 45 "assigned_application_slug", 46 "assigned_application_name", 47 "assigned_backchannel_application_slug", 48 "assigned_backchannel_application_name", 49 "verbose_name", 50 "verbose_name_plural", 51 "meta_model_name", 52 ] 53 extra_kwargs = { 54 "authorization_flow": {"required": True, "allow_null": False}, 55 } 56 57 58 class ProviderFilter(FilterSet): 59 """Filter for groups""" 60 61 application__isnull = BooleanFilter( 62 field_name="application", 63 lookup_expr="isnull", 64 ) 65 backchannel_only = BooleanFilter( 66 method="filter_backchannel_only", 67 ) 68 69 def filter_backchannel_only(self, queryset, name, value): 70 """Only return backchannel providers""" 71 return queryset.filter(is_backchannel=value) 72 73 74 class ProviderViewSet( 75 mixins.RetrieveModelMixin, 76 mixins.DestroyModelMixin, 77 UsedByMixin, 78 mixins.ListModelMixin, 79 GenericViewSet, 80 ): 81 """Provider Viewset""" 82 83 queryset = Provider.objects.none() 84 serializer_class = ProviderSerializer 85 filterset_class = ProviderFilter 86 search_fields = [ 87 "name", 88 "application__name", 89 ] 90 91 def get_queryset(self): # pragma: no cover 92 return Provider.objects.select_subclasses() 93 94 @extend_schema(responses={200: TypeCreateSerializer(many=True)}) 95 @action(detail=False, pagination_class=None, filter_backends=[]) 96 def types(self, request: Request) -> Response: 97 """Get all creatable provider types""" 98 data = [] 99 for subclass in all_subclasses(self.queryset.model): 100 subclass: Provider 101 if subclass._meta.abstract: 102 continue 103 data.append( 104 { 105 "name": subclass._meta.verbose_name, 106 "description": subclass.__doc__, 107 "component": subclass().component, 108 "model_name": subclass._meta.model_name, 109 } 110 ) 111 data.append( 112 { 113 "name": _("SAML Provider from Metadata"), 114 "description": _("Create a SAML Provider by importing its Metadata."), 115 "component": "ak-provider-saml-import-form", 116 "model_name": "", 117 } 118 ) 119 return Response(TypeCreateSerializer(data, many=True).data) 120 [end of authentik/core/api/providers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/core/api/providers.py b/authentik/core/api/providers.py --- a/authentik/core/api/providers.py +++ b/authentik/core/api/providers.py @@ -1,4 +1,6 @@ """Provider API Views""" +from django.db.models import QuerySet +from django.db.models.query import Q from django.utils.translation import gettext_lazy as _ from django_filters.filters import BooleanFilter from django_filters.filterset import FilterSet @@ -56,17 +58,22 @@ class ProviderFilter(FilterSet): - """Filter for groups""" + """Filter for providers""" - application__isnull = BooleanFilter( - field_name="application", - lookup_expr="isnull", - ) + application__isnull = BooleanFilter(method="filter_application__isnull") backchannel_only = BooleanFilter( method="filter_backchannel_only", ) - def filter_backchannel_only(self, queryset, name, value): + def filter_application__isnull(self, queryset: QuerySet, name, value): + """Only return providers that are neither assigned to application, + both as provider or application provider""" + return queryset.filter( + Q(backchannel_application__isnull=value, is_backchannel=True) + | Q(application__isnull=value) + ) + + def filter_backchannel_only(self, queryset: QuerySet, name, value): """Only return backchannel providers""" return queryset.filter(is_backchannel=value) diff --git a/authentik/providers/ldap/api.py b/authentik/providers/ldap/api.py --- a/authentik/providers/ldap/api.py +++ b/authentik/providers/ldap/api.py @@ -1,4 +1,8 @@ """LDAPProvider API Views""" +from django.db.models import QuerySet +from django.db.models.query import Q +from django_filters.filters import BooleanFilter +from django_filters.filterset import FilterSet from rest_framework.fields import CharField, ListField, SerializerMethodField from rest_framework.serializers import ModelSerializer from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet @@ -29,24 +33,41 @@ extra_kwargs = ProviderSerializer.Meta.extra_kwargs +class LDAPProviderFilter(FilterSet): + """LDAP Provider filters""" + + application__isnull = BooleanFilter(method="filter_application__isnull") + + def filter_application__isnull(self, queryset: QuerySet, name, value): + """Only return providers that are neither assigned to application, + both as provider or application provider""" + return queryset.filter( + Q(backchannel_application__isnull=value) | Q(application__isnull=value) + ) + + class Meta: + model = LDAPProvider + fields = { + "application": ["isnull"], + "name": ["iexact"], + "authorization_flow__slug": ["iexact"], + "base_dn": ["iexact"], + "search_group__group_uuid": ["iexact"], + "search_group__name": ["iexact"], + "certificate__kp_uuid": ["iexact"], + "certificate__name": ["iexact"], + "tls_server_name": ["iexact"], + "uid_start_number": ["iexact"], + "gid_start_number": ["iexact"], + } + + class LDAPProviderViewSet(UsedByMixin, ModelViewSet): """LDAPProvider Viewset""" queryset = LDAPProvider.objects.all() serializer_class = LDAPProviderSerializer - filterset_fields = { - "application": ["isnull"], - "name": ["iexact"], - "authorization_flow__slug": ["iexact"], - "base_dn": ["iexact"], - "search_group__group_uuid": ["iexact"], - "search_group__name": ["iexact"], - "certificate__kp_uuid": ["iexact"], - "certificate__name": ["iexact"], - "tls_server_name": ["iexact"], - "uid_start_number": ["iexact"], - "gid_start_number": ["iexact"], - } + filterset_class = LDAPProviderFilter search_fields = ["name"] ordering = ["name"]
{"golden_diff": "diff --git a/authentik/core/api/providers.py b/authentik/core/api/providers.py\n--- a/authentik/core/api/providers.py\n+++ b/authentik/core/api/providers.py\n@@ -1,4 +1,6 @@\n \"\"\"Provider API Views\"\"\"\n+from django.db.models import QuerySet\n+from django.db.models.query import Q\n from django.utils.translation import gettext_lazy as _\n from django_filters.filters import BooleanFilter\n from django_filters.filterset import FilterSet\n@@ -56,17 +58,22 @@\n \n \n class ProviderFilter(FilterSet):\n- \"\"\"Filter for groups\"\"\"\n+ \"\"\"Filter for providers\"\"\"\n \n- application__isnull = BooleanFilter(\n- field_name=\"application\",\n- lookup_expr=\"isnull\",\n- )\n+ application__isnull = BooleanFilter(method=\"filter_application__isnull\")\n backchannel_only = BooleanFilter(\n method=\"filter_backchannel_only\",\n )\n \n- def filter_backchannel_only(self, queryset, name, value):\n+ def filter_application__isnull(self, queryset: QuerySet, name, value):\n+ \"\"\"Only return providers that are neither assigned to application,\n+ both as provider or application provider\"\"\"\n+ return queryset.filter(\n+ Q(backchannel_application__isnull=value, is_backchannel=True)\n+ | Q(application__isnull=value)\n+ )\n+\n+ def filter_backchannel_only(self, queryset: QuerySet, name, value):\n \"\"\"Only return backchannel providers\"\"\"\n return queryset.filter(is_backchannel=value)\n \ndiff --git a/authentik/providers/ldap/api.py b/authentik/providers/ldap/api.py\n--- a/authentik/providers/ldap/api.py\n+++ b/authentik/providers/ldap/api.py\n@@ -1,4 +1,8 @@\n \"\"\"LDAPProvider API Views\"\"\"\n+from django.db.models import QuerySet\n+from django.db.models.query import Q\n+from django_filters.filters import BooleanFilter\n+from django_filters.filterset import FilterSet\n from rest_framework.fields import CharField, ListField, SerializerMethodField\n from rest_framework.serializers import ModelSerializer\n from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet\n@@ -29,24 +33,41 @@\n extra_kwargs = ProviderSerializer.Meta.extra_kwargs\n \n \n+class LDAPProviderFilter(FilterSet):\n+ \"\"\"LDAP Provider filters\"\"\"\n+\n+ application__isnull = BooleanFilter(method=\"filter_application__isnull\")\n+\n+ def filter_application__isnull(self, queryset: QuerySet, name, value):\n+ \"\"\"Only return providers that are neither assigned to application,\n+ both as provider or application provider\"\"\"\n+ return queryset.filter(\n+ Q(backchannel_application__isnull=value) | Q(application__isnull=value)\n+ )\n+\n+ class Meta:\n+ model = LDAPProvider\n+ fields = {\n+ \"application\": [\"isnull\"],\n+ \"name\": [\"iexact\"],\n+ \"authorization_flow__slug\": [\"iexact\"],\n+ \"base_dn\": [\"iexact\"],\n+ \"search_group__group_uuid\": [\"iexact\"],\n+ \"search_group__name\": [\"iexact\"],\n+ \"certificate__kp_uuid\": [\"iexact\"],\n+ \"certificate__name\": [\"iexact\"],\n+ \"tls_server_name\": [\"iexact\"],\n+ \"uid_start_number\": [\"iexact\"],\n+ \"gid_start_number\": [\"iexact\"],\n+ }\n+\n+\n class LDAPProviderViewSet(UsedByMixin, ModelViewSet):\n \"\"\"LDAPProvider Viewset\"\"\"\n \n queryset = LDAPProvider.objects.all()\n serializer_class = LDAPProviderSerializer\n- filterset_fields = {\n- \"application\": [\"isnull\"],\n- \"name\": [\"iexact\"],\n- \"authorization_flow__slug\": [\"iexact\"],\n- \"base_dn\": [\"iexact\"],\n- \"search_group__group_uuid\": [\"iexact\"],\n- \"search_group__name\": [\"iexact\"],\n- \"certificate__kp_uuid\": [\"iexact\"],\n- \"certificate__name\": [\"iexact\"],\n- \"tls_server_name\": [\"iexact\"],\n- \"uid_start_number\": [\"iexact\"],\n- \"gid_start_number\": [\"iexact\"],\n- }\n+ filterset_class = LDAPProviderFilter\n search_fields = [\"name\"]\n ordering = [\"name\"]\n", "issue": "Backchannel LDAP providers missing from outpost configuration page\n**Description**\r\nWhen creating an LDAP Provider, it will appear in the Outpost create/configuration page if it is assigned an application, but not if it's assigned as an application backchannel.\r\n\r\nThis means we're unable to associate an LDAP backchannel to an outpost.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Create a demo Application and an LDAP Provider\r\n2. Assign the LDAP Provider as a _backchannel provider_ to the application.\r\n3. Attempt to create an LDAP outpost. Notice the LDAP provider does not appear on the selectable list.\r\n\r\n**Expected behavior**\r\nThe backchannel should appear the outposts \"applications\" list, so that it may be added to the outpost.\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2023.5.2\r\n- Deployment: docker-compose\r\n\n", "before_files": [{"content": "\"\"\"LDAPProvider API Views\"\"\"\nfrom rest_framework.fields import CharField, ListField, SerializerMethodField\nfrom rest_framework.serializers import ModelSerializer\nfrom rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet\n\nfrom authentik.core.api.providers import ProviderSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.providers.ldap.models import LDAPProvider\n\n\nclass LDAPProviderSerializer(ProviderSerializer):\n \"\"\"LDAPProvider Serializer\"\"\"\n\n outpost_set = ListField(child=CharField(), read_only=True, source=\"outpost_set.all\")\n\n class Meta:\n model = LDAPProvider\n fields = ProviderSerializer.Meta.fields + [\n \"base_dn\",\n \"search_group\",\n \"certificate\",\n \"tls_server_name\",\n \"uid_start_number\",\n \"gid_start_number\",\n \"outpost_set\",\n \"search_mode\",\n \"bind_mode\",\n ]\n extra_kwargs = ProviderSerializer.Meta.extra_kwargs\n\n\nclass LDAPProviderViewSet(UsedByMixin, ModelViewSet):\n \"\"\"LDAPProvider Viewset\"\"\"\n\n queryset = LDAPProvider.objects.all()\n serializer_class = LDAPProviderSerializer\n filterset_fields = {\n \"application\": [\"isnull\"],\n \"name\": [\"iexact\"],\n \"authorization_flow__slug\": [\"iexact\"],\n \"base_dn\": [\"iexact\"],\n \"search_group__group_uuid\": [\"iexact\"],\n \"search_group__name\": [\"iexact\"],\n \"certificate__kp_uuid\": [\"iexact\"],\n \"certificate__name\": [\"iexact\"],\n \"tls_server_name\": [\"iexact\"],\n \"uid_start_number\": [\"iexact\"],\n \"gid_start_number\": [\"iexact\"],\n }\n search_fields = [\"name\"]\n ordering = [\"name\"]\n\n\nclass LDAPOutpostConfigSerializer(ModelSerializer):\n \"\"\"LDAPProvider Serializer\"\"\"\n\n application_slug = SerializerMethodField()\n bind_flow_slug = CharField(source=\"authorization_flow.slug\")\n\n def get_application_slug(self, instance: LDAPProvider) -> str:\n \"\"\"Prioritise backchannel slug over direct application slug\"\"\"\n if instance.backchannel_application:\n return instance.backchannel_application.slug\n return instance.application.slug\n\n class Meta:\n model = LDAPProvider\n fields = [\n \"pk\",\n \"name\",\n \"base_dn\",\n \"bind_flow_slug\",\n \"application_slug\",\n \"search_group\",\n \"certificate\",\n \"tls_server_name\",\n \"uid_start_number\",\n \"gid_start_number\",\n \"search_mode\",\n \"bind_mode\",\n ]\n\n\nclass LDAPOutpostConfigViewSet(ReadOnlyModelViewSet):\n \"\"\"LDAPProvider Viewset\"\"\"\n\n queryset = LDAPProvider.objects.filter(application__isnull=False)\n serializer_class = LDAPOutpostConfigSerializer\n ordering = [\"name\"]\n search_fields = [\"name\"]\n filterset_fields = [\"name\"]\n", "path": "authentik/providers/ldap/api.py"}, {"content": "\"\"\"Provider API Views\"\"\"\nfrom django.utils.translation import gettext_lazy as _\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import ReadOnlyField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer, SerializerMethodField\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import MetaNameSerializer, TypeCreateSerializer\nfrom authentik.core.models import Provider\nfrom authentik.lib.utils.reflection import all_subclasses\n\n\nclass ProviderSerializer(ModelSerializer, MetaNameSerializer):\n \"\"\"Provider Serializer\"\"\"\n\n assigned_application_slug = ReadOnlyField(source=\"application.slug\")\n assigned_application_name = ReadOnlyField(source=\"application.name\")\n assigned_backchannel_application_slug = ReadOnlyField(source=\"backchannel_application.slug\")\n assigned_backchannel_application_name = ReadOnlyField(source=\"backchannel_application.name\")\n\n component = SerializerMethodField()\n\n def get_component(self, obj: Provider) -> str: # pragma: no cover\n \"\"\"Get object component so that we know how to edit the object\"\"\"\n if obj.__class__ == Provider:\n return \"\"\n return obj.component\n\n class Meta:\n model = Provider\n fields = [\n \"pk\",\n \"name\",\n \"authentication_flow\",\n \"authorization_flow\",\n \"property_mappings\",\n \"component\",\n \"assigned_application_slug\",\n \"assigned_application_name\",\n \"assigned_backchannel_application_slug\",\n \"assigned_backchannel_application_name\",\n \"verbose_name\",\n \"verbose_name_plural\",\n \"meta_model_name\",\n ]\n extra_kwargs = {\n \"authorization_flow\": {\"required\": True, \"allow_null\": False},\n }\n\n\nclass ProviderFilter(FilterSet):\n \"\"\"Filter for groups\"\"\"\n\n application__isnull = BooleanFilter(\n field_name=\"application\",\n lookup_expr=\"isnull\",\n )\n backchannel_only = BooleanFilter(\n method=\"filter_backchannel_only\",\n )\n\n def filter_backchannel_only(self, queryset, name, value):\n \"\"\"Only return backchannel providers\"\"\"\n return queryset.filter(is_backchannel=value)\n\n\nclass ProviderViewSet(\n mixins.RetrieveModelMixin,\n mixins.DestroyModelMixin,\n UsedByMixin,\n mixins.ListModelMixin,\n GenericViewSet,\n):\n \"\"\"Provider Viewset\"\"\"\n\n queryset = Provider.objects.none()\n serializer_class = ProviderSerializer\n filterset_class = ProviderFilter\n search_fields = [\n \"name\",\n \"application__name\",\n ]\n\n def get_queryset(self): # pragma: no cover\n return Provider.objects.select_subclasses()\n\n @extend_schema(responses={200: TypeCreateSerializer(many=True)})\n @action(detail=False, pagination_class=None, filter_backends=[])\n def types(self, request: Request) -> Response:\n \"\"\"Get all creatable provider types\"\"\"\n data = []\n for subclass in all_subclasses(self.queryset.model):\n subclass: Provider\n if subclass._meta.abstract:\n continue\n data.append(\n {\n \"name\": subclass._meta.verbose_name,\n \"description\": subclass.__doc__,\n \"component\": subclass().component,\n \"model_name\": subclass._meta.model_name,\n }\n )\n data.append(\n {\n \"name\": _(\"SAML Provider from Metadata\"),\n \"description\": _(\"Create a SAML Provider by importing its Metadata.\"),\n \"component\": \"ak-provider-saml-import-form\",\n \"model_name\": \"\",\n }\n )\n return Response(TypeCreateSerializer(data, many=True).data)\n", "path": "authentik/core/api/providers.py"}]}
2,571
923
gh_patches_debug_18903
rasdani/github-patches
git_diff
docker__docker-py-1682
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Check resource error in container network API ``` docker python client v2.4.2 python v2.7.12 docker v17.03.1-ce Ubuntu 16.04 ``` PR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'. Reproduce using the following snippet: ```python import docker cli = docker.APIClient(base_url='unix:///var/run/docker.sock') cli.pull(repository='ubuntu', tag='latest') name = 'my_ubuntu' container = cli.create_container(image='ubuntu:latest', name=name) cli.connect_container_to_network(container=name, net_id='bridge') ``` This causes: ``` Traceback (most recent call last): File "test.py", line 8, in <module> cli.connect_container_to_network(container=name, net_id='bridge') File "/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 17, in wrapped 'Resource ID was not provided' docker.errors.NullResource: Resource ID was not provided ``` Check resource error in container network API ``` docker python client v2.4.2 python v2.7.12 docker v17.03.1-ce Ubuntu 16.04 ``` PR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'. Reproduce using the following snippet: ```python import docker cli = docker.APIClient(base_url='unix:///var/run/docker.sock') cli.pull(repository='ubuntu', tag='latest') name = 'my_ubuntu' container = cli.create_container(image='ubuntu:latest', name=name) cli.connect_container_to_network(container=name, net_id='bridge') ``` This causes: ``` Traceback (most recent call last): File "test.py", line 8, in <module> cli.connect_container_to_network(container=name, net_id='bridge') File "/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py", line 17, in wrapped 'Resource ID was not provided' docker.errors.NullResource: Resource ID was not provided ``` </issue> <code> [start of docker/api/network.py] 1 from ..errors import InvalidVersion 2 from ..utils import check_resource, minimum_version 3 from ..utils import version_lt 4 from .. import utils 5 6 7 class NetworkApiMixin(object): 8 @minimum_version('1.21') 9 def networks(self, names=None, ids=None, filters=None): 10 """ 11 List networks. Similar to the ``docker networks ls`` command. 12 13 Args: 14 names (:py:class:`list`): List of names to filter by 15 ids (:py:class:`list`): List of ids to filter by 16 filters (dict): Filters to be processed on the network list. 17 Available filters: 18 - ``driver=[<driver-name>]`` Matches a network's driver. 19 - ``label=[<key>]`` or ``label=[<key>=<value>]``. 20 - ``type=["custom"|"builtin"]`` Filters networks by type. 21 22 Returns: 23 (dict): List of network objects. 24 25 Raises: 26 :py:class:`docker.errors.APIError` 27 If the server returns an error. 28 """ 29 30 if filters is None: 31 filters = {} 32 if names: 33 filters['name'] = names 34 if ids: 35 filters['id'] = ids 36 params = {'filters': utils.convert_filters(filters)} 37 url = self._url("/networks") 38 res = self._get(url, params=params) 39 return self._result(res, json=True) 40 41 @minimum_version('1.21') 42 def create_network(self, name, driver=None, options=None, ipam=None, 43 check_duplicate=None, internal=False, labels=None, 44 enable_ipv6=False, attachable=None, scope=None, 45 ingress=None): 46 """ 47 Create a network. Similar to the ``docker network create``. 48 49 Args: 50 name (str): Name of the network 51 driver (str): Name of the driver used to create the network 52 options (dict): Driver options as a key-value dictionary 53 ipam (IPAMConfig): Optional custom IP scheme for the network. 54 check_duplicate (bool): Request daemon to check for networks with 55 same name. Default: ``True``. 56 internal (bool): Restrict external access to the network. Default 57 ``False``. 58 labels (dict): Map of labels to set on the network. Default 59 ``None``. 60 enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``. 61 attachable (bool): If enabled, and the network is in the global 62 scope, non-service containers on worker nodes will be able to 63 connect to the network. 64 ingress (bool): If set, create an ingress network which provides 65 the routing-mesh in swarm mode. 66 67 Returns: 68 (dict): The created network reference object 69 70 Raises: 71 :py:class:`docker.errors.APIError` 72 If the server returns an error. 73 74 Example: 75 A network using the bridge driver: 76 77 >>> client.create_network("network1", driver="bridge") 78 79 You can also create more advanced networks with custom IPAM 80 configurations. For example, setting the subnet to 81 ``192.168.52.0/24`` and gateway address to ``192.168.52.254``. 82 83 .. code-block:: python 84 85 >>> ipam_pool = docker.types.IPAMPool( 86 subnet='192.168.52.0/24', 87 gateway='192.168.52.254' 88 ) 89 >>> ipam_config = docker.types.IPAMConfig( 90 pool_configs=[ipam_pool] 91 ) 92 >>> docker_client.create_network("network1", driver="bridge", 93 ipam=ipam_config) 94 """ 95 if options is not None and not isinstance(options, dict): 96 raise TypeError('options must be a dictionary') 97 98 data = { 99 'Name': name, 100 'Driver': driver, 101 'Options': options, 102 'IPAM': ipam, 103 'CheckDuplicate': check_duplicate, 104 } 105 106 if labels is not None: 107 if version_lt(self._version, '1.23'): 108 raise InvalidVersion( 109 'network labels were introduced in API 1.23' 110 ) 111 if not isinstance(labels, dict): 112 raise TypeError('labels must be a dictionary') 113 data["Labels"] = labels 114 115 if enable_ipv6: 116 if version_lt(self._version, '1.23'): 117 raise InvalidVersion( 118 'enable_ipv6 was introduced in API 1.23' 119 ) 120 data['EnableIPv6'] = True 121 122 if internal: 123 if version_lt(self._version, '1.22'): 124 raise InvalidVersion('Internal networks are not ' 125 'supported in API version < 1.22') 126 data['Internal'] = True 127 128 if attachable is not None: 129 if version_lt(self._version, '1.24'): 130 raise InvalidVersion( 131 'attachable is not supported in API version < 1.24' 132 ) 133 data['Attachable'] = attachable 134 135 if ingress is not None: 136 if version_lt(self._version, '1.29'): 137 raise InvalidVersion( 138 'ingress is not supported in API version < 1.29' 139 ) 140 141 data['Ingress'] = ingress 142 143 url = self._url("/networks/create") 144 res = self._post_json(url, data=data) 145 return self._result(res, json=True) 146 147 @minimum_version('1.25') 148 def prune_networks(self, filters=None): 149 """ 150 Delete unused networks 151 152 Args: 153 filters (dict): Filters to process on the prune list. 154 155 Returns: 156 (dict): A dict containing a list of deleted network names and 157 the amount of disk space reclaimed in bytes. 158 159 Raises: 160 :py:class:`docker.errors.APIError` 161 If the server returns an error. 162 """ 163 params = {} 164 if filters: 165 params['filters'] = utils.convert_filters(filters) 166 url = self._url('/networks/prune') 167 return self._result(self._post(url, params=params), True) 168 169 @minimum_version('1.21') 170 @check_resource('net_id') 171 def remove_network(self, net_id): 172 """ 173 Remove a network. Similar to the ``docker network rm`` command. 174 175 Args: 176 net_id (str): The network's id 177 """ 178 url = self._url("/networks/{0}", net_id) 179 res = self._delete(url) 180 self._raise_for_status(res) 181 182 @minimum_version('1.21') 183 @check_resource('net_id') 184 def inspect_network(self, net_id, verbose=None): 185 """ 186 Get detailed information about a network. 187 188 Args: 189 net_id (str): ID of network 190 verbose (bool): Show the service details across the cluster in 191 swarm mode. 192 """ 193 params = {} 194 if verbose is not None: 195 if version_lt(self._version, '1.28'): 196 raise InvalidVersion('verbose was introduced in API 1.28') 197 params['verbose'] = verbose 198 199 url = self._url("/networks/{0}", net_id) 200 res = self._get(url, params=params) 201 return self._result(res, json=True) 202 203 @check_resource('image') 204 @minimum_version('1.21') 205 def connect_container_to_network(self, container, net_id, 206 ipv4_address=None, ipv6_address=None, 207 aliases=None, links=None, 208 link_local_ips=None): 209 """ 210 Connect a container to a network. 211 212 Args: 213 container (str): container-id/name to be connected to the network 214 net_id (str): network id 215 aliases (:py:class:`list`): A list of aliases for this endpoint. 216 Names in that list can be used within the network to reach the 217 container. Defaults to ``None``. 218 links (:py:class:`list`): A list of links for this endpoint. 219 Containers declared in this list will be linked to this 220 container. Defaults to ``None``. 221 ipv4_address (str): The IP address of this container on the 222 network, using the IPv4 protocol. Defaults to ``None``. 223 ipv6_address (str): The IP address of this container on the 224 network, using the IPv6 protocol. Defaults to ``None``. 225 link_local_ips (:py:class:`list`): A list of link-local 226 (IPv4/IPv6) addresses. 227 """ 228 data = { 229 "Container": container, 230 "EndpointConfig": self.create_endpoint_config( 231 aliases=aliases, links=links, ipv4_address=ipv4_address, 232 ipv6_address=ipv6_address, link_local_ips=link_local_ips 233 ), 234 } 235 236 url = self._url("/networks/{0}/connect", net_id) 237 res = self._post_json(url, data=data) 238 self._raise_for_status(res) 239 240 @check_resource('image') 241 @minimum_version('1.21') 242 def disconnect_container_from_network(self, container, net_id, 243 force=False): 244 """ 245 Disconnect a container from a network. 246 247 Args: 248 container (str): container ID or name to be disconnected from the 249 network 250 net_id (str): network ID 251 force (bool): Force the container to disconnect from a network. 252 Default: ``False`` 253 """ 254 data = {"Container": container} 255 if force: 256 if version_lt(self._version, '1.22'): 257 raise InvalidVersion( 258 'Forced disconnect was introduced in API 1.22' 259 ) 260 data['Force'] = force 261 url = self._url("/networks/{0}/disconnect", net_id) 262 res = self._post_json(url, data=data) 263 self._raise_for_status(res) 264 [end of docker/api/network.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/api/network.py b/docker/api/network.py --- a/docker/api/network.py +++ b/docker/api/network.py @@ -200,7 +200,7 @@ res = self._get(url, params=params) return self._result(res, json=True) - @check_resource('image') + @check_resource('container') @minimum_version('1.21') def connect_container_to_network(self, container, net_id, ipv4_address=None, ipv6_address=None, @@ -237,7 +237,7 @@ res = self._post_json(url, data=data) self._raise_for_status(res) - @check_resource('image') + @check_resource('container') @minimum_version('1.21') def disconnect_container_from_network(self, container, net_id, force=False):
{"golden_diff": "diff --git a/docker/api/network.py b/docker/api/network.py\n--- a/docker/api/network.py\n+++ b/docker/api/network.py\n@@ -200,7 +200,7 @@\n res = self._get(url, params=params)\n return self._result(res, json=True)\n \n- @check_resource('image')\n+ @check_resource('container')\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n@@ -237,7 +237,7 @@\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n \n- @check_resource('image')\n+ @check_resource('container')\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n", "issue": "Check resource error in container network API\n```\r\ndocker python client v2.4.2\r\npython v2.7.12\r\ndocker v17.03.1-ce\r\nUbuntu 16.04\r\n```\r\n\r\nPR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.\r\n\r\nReproduce using the following snippet:\r\n```python\r\nimport docker\r\n\r\ncli = docker.APIClient(base_url='unix:///var/run/docker.sock')\r\ncli.pull(repository='ubuntu', tag='latest')\r\n\r\nname = 'my_ubuntu'\r\ncontainer = cli.create_container(image='ubuntu:latest', name=name)\r\ncli.connect_container_to_network(container=name, net_id='bridge')\r\n```\r\n\r\nThis causes:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 8, in <module>\r\n cli.connect_container_to_network(container=name, net_id='bridge')\r\n File \"/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 17, in wrapped\r\n 'Resource ID was not provided'\r\ndocker.errors.NullResource: Resource ID was not provided\r\n```\nCheck resource error in container network API\n```\r\ndocker python client v2.4.2\r\npython v2.7.12\r\ndocker v17.03.1-ce\r\nUbuntu 16.04\r\n```\r\n\r\nPR #1649 updated the `check_resource` decorator to handle different resource names. Container network API functions `connect_container_to_network()` and `disconnect_container_from_network()` check 'image' as resource ID and not 'container'.\r\n\r\nReproduce using the following snippet:\r\n```python\r\nimport docker\r\n\r\ncli = docker.APIClient(base_url='unix:///var/run/docker.sock')\r\ncli.pull(repository='ubuntu', tag='latest')\r\n\r\nname = 'my_ubuntu'\r\ncontainer = cli.create_container(image='ubuntu:latest', name=name)\r\ncli.connect_container_to_network(container=name, net_id='bridge')\r\n```\r\n\r\nThis causes:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 8, in <module>\r\n cli.connect_container_to_network(container=name, net_id='bridge')\r\n File \"/home/mberry/scratch/virtualenv/docker_py/local/lib/python2.7/site-packages/docker/utils/decorators.py\", line 17, in wrapped\r\n 'Resource ID was not provided'\r\ndocker.errors.NullResource: Resource ID was not provided\r\n```\n", "before_files": [{"content": "from ..errors import InvalidVersion\nfrom ..utils import check_resource, minimum_version\nfrom ..utils import version_lt\nfrom .. import utils\n\n\nclass NetworkApiMixin(object):\n @minimum_version('1.21')\n def networks(self, names=None, ids=None, filters=None):\n \"\"\"\n List networks. Similar to the ``docker networks ls`` command.\n\n Args:\n names (:py:class:`list`): List of names to filter by\n ids (:py:class:`list`): List of ids to filter by\n filters (dict): Filters to be processed on the network list.\n Available filters:\n - ``driver=[<driver-name>]`` Matches a network's driver.\n - ``label=[<key>]`` or ``label=[<key>=<value>]``.\n - ``type=[\"custom\"|\"builtin\"]`` Filters networks by type.\n\n Returns:\n (dict): List of network objects.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n\n if filters is None:\n filters = {}\n if names:\n filters['name'] = names\n if ids:\n filters['id'] = ids\n params = {'filters': utils.convert_filters(filters)}\n url = self._url(\"/networks\")\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @minimum_version('1.21')\n def create_network(self, name, driver=None, options=None, ipam=None,\n check_duplicate=None, internal=False, labels=None,\n enable_ipv6=False, attachable=None, scope=None,\n ingress=None):\n \"\"\"\n Create a network. Similar to the ``docker network create``.\n\n Args:\n name (str): Name of the network\n driver (str): Name of the driver used to create the network\n options (dict): Driver options as a key-value dictionary\n ipam (IPAMConfig): Optional custom IP scheme for the network.\n check_duplicate (bool): Request daemon to check for networks with\n same name. Default: ``True``.\n internal (bool): Restrict external access to the network. Default\n ``False``.\n labels (dict): Map of labels to set on the network. Default\n ``None``.\n enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.\n attachable (bool): If enabled, and the network is in the global\n scope, non-service containers on worker nodes will be able to\n connect to the network.\n ingress (bool): If set, create an ingress network which provides\n the routing-mesh in swarm mode.\n\n Returns:\n (dict): The created network reference object\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n\n Example:\n A network using the bridge driver:\n\n >>> client.create_network(\"network1\", driver=\"bridge\")\n\n You can also create more advanced networks with custom IPAM\n configurations. For example, setting the subnet to\n ``192.168.52.0/24`` and gateway address to ``192.168.52.254``.\n\n .. code-block:: python\n\n >>> ipam_pool = docker.types.IPAMPool(\n subnet='192.168.52.0/24',\n gateway='192.168.52.254'\n )\n >>> ipam_config = docker.types.IPAMConfig(\n pool_configs=[ipam_pool]\n )\n >>> docker_client.create_network(\"network1\", driver=\"bridge\",\n ipam=ipam_config)\n \"\"\"\n if options is not None and not isinstance(options, dict):\n raise TypeError('options must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'Options': options,\n 'IPAM': ipam,\n 'CheckDuplicate': check_duplicate,\n }\n\n if labels is not None:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'network labels were introduced in API 1.23'\n )\n if not isinstance(labels, dict):\n raise TypeError('labels must be a dictionary')\n data[\"Labels\"] = labels\n\n if enable_ipv6:\n if version_lt(self._version, '1.23'):\n raise InvalidVersion(\n 'enable_ipv6 was introduced in API 1.23'\n )\n data['EnableIPv6'] = True\n\n if internal:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion('Internal networks are not '\n 'supported in API version < 1.22')\n data['Internal'] = True\n\n if attachable is not None:\n if version_lt(self._version, '1.24'):\n raise InvalidVersion(\n 'attachable is not supported in API version < 1.24'\n )\n data['Attachable'] = attachable\n\n if ingress is not None:\n if version_lt(self._version, '1.29'):\n raise InvalidVersion(\n 'ingress is not supported in API version < 1.29'\n )\n\n data['Ingress'] = ingress\n\n url = self._url(\"/networks/create\")\n res = self._post_json(url, data=data)\n return self._result(res, json=True)\n\n @minimum_version('1.25')\n def prune_networks(self, filters=None):\n \"\"\"\n Delete unused networks\n\n Args:\n filters (dict): Filters to process on the prune list.\n\n Returns:\n (dict): A dict containing a list of deleted network names and\n the amount of disk space reclaimed in bytes.\n\n Raises:\n :py:class:`docker.errors.APIError`\n If the server returns an error.\n \"\"\"\n params = {}\n if filters:\n params['filters'] = utils.convert_filters(filters)\n url = self._url('/networks/prune')\n return self._result(self._post(url, params=params), True)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def remove_network(self, net_id):\n \"\"\"\n Remove a network. Similar to the ``docker network rm`` command.\n\n Args:\n net_id (str): The network's id\n \"\"\"\n url = self._url(\"/networks/{0}\", net_id)\n res = self._delete(url)\n self._raise_for_status(res)\n\n @minimum_version('1.21')\n @check_resource('net_id')\n def inspect_network(self, net_id, verbose=None):\n \"\"\"\n Get detailed information about a network.\n\n Args:\n net_id (str): ID of network\n verbose (bool): Show the service details across the cluster in\n swarm mode.\n \"\"\"\n params = {}\n if verbose is not None:\n if version_lt(self._version, '1.28'):\n raise InvalidVersion('verbose was introduced in API 1.28')\n params['verbose'] = verbose\n\n url = self._url(\"/networks/{0}\", net_id)\n res = self._get(url, params=params)\n return self._result(res, json=True)\n\n @check_resource('image')\n @minimum_version('1.21')\n def connect_container_to_network(self, container, net_id,\n ipv4_address=None, ipv6_address=None,\n aliases=None, links=None,\n link_local_ips=None):\n \"\"\"\n Connect a container to a network.\n\n Args:\n container (str): container-id/name to be connected to the network\n net_id (str): network id\n aliases (:py:class:`list`): A list of aliases for this endpoint.\n Names in that list can be used within the network to reach the\n container. Defaults to ``None``.\n links (:py:class:`list`): A list of links for this endpoint.\n Containers declared in this list will be linked to this\n container. Defaults to ``None``.\n ipv4_address (str): The IP address of this container on the\n network, using the IPv4 protocol. Defaults to ``None``.\n ipv6_address (str): The IP address of this container on the\n network, using the IPv6 protocol. Defaults to ``None``.\n link_local_ips (:py:class:`list`): A list of link-local\n (IPv4/IPv6) addresses.\n \"\"\"\n data = {\n \"Container\": container,\n \"EndpointConfig\": self.create_endpoint_config(\n aliases=aliases, links=links, ipv4_address=ipv4_address,\n ipv6_address=ipv6_address, link_local_ips=link_local_ips\n ),\n }\n\n url = self._url(\"/networks/{0}/connect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n\n @check_resource('image')\n @minimum_version('1.21')\n def disconnect_container_from_network(self, container, net_id,\n force=False):\n \"\"\"\n Disconnect a container from a network.\n\n Args:\n container (str): container ID or name to be disconnected from the\n network\n net_id (str): network ID\n force (bool): Force the container to disconnect from a network.\n Default: ``False``\n \"\"\"\n data = {\"Container\": container}\n if force:\n if version_lt(self._version, '1.22'):\n raise InvalidVersion(\n 'Forced disconnect was introduced in API 1.22'\n )\n data['Force'] = force\n url = self._url(\"/networks/{0}/disconnect\", net_id)\n res = self._post_json(url, data=data)\n self._raise_for_status(res)\n", "path": "docker/api/network.py"}]}
3,903
192
gh_patches_debug_8636
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-2257
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove werkzeug from Flask instrumentation dependencies </issue> <code> [start of opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 # DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES. 16 # RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE. 17 18 libraries = [ 19 { 20 "library": "aio_pika >= 7.2.0, < 10.0.0", 21 "instrumentation": "opentelemetry-instrumentation-aio-pika==0.44b0.dev", 22 }, 23 { 24 "library": "aiohttp ~= 3.0", 25 "instrumentation": "opentelemetry-instrumentation-aiohttp-client==0.44b0.dev", 26 }, 27 { 28 "library": "aiohttp ~= 3.0", 29 "instrumentation": "opentelemetry-instrumentation-aiohttp-server==0.44b0.dev", 30 }, 31 { 32 "library": "aiopg >= 0.13.0, < 2.0.0", 33 "instrumentation": "opentelemetry-instrumentation-aiopg==0.44b0.dev", 34 }, 35 { 36 "library": "asgiref ~= 3.0", 37 "instrumentation": "opentelemetry-instrumentation-asgi==0.44b0.dev", 38 }, 39 { 40 "library": "asyncpg >= 0.12.0", 41 "instrumentation": "opentelemetry-instrumentation-asyncpg==0.44b0.dev", 42 }, 43 { 44 "library": "boto~=2.0", 45 "instrumentation": "opentelemetry-instrumentation-boto==0.44b0.dev", 46 }, 47 { 48 "library": "boto3 ~= 1.0", 49 "instrumentation": "opentelemetry-instrumentation-boto3sqs==0.44b0.dev", 50 }, 51 { 52 "library": "botocore ~= 1.0", 53 "instrumentation": "opentelemetry-instrumentation-botocore==0.44b0.dev", 54 }, 55 { 56 "library": "cassandra-driver ~= 3.25", 57 "instrumentation": "opentelemetry-instrumentation-cassandra==0.44b0.dev", 58 }, 59 { 60 "library": "scylla-driver ~= 3.25", 61 "instrumentation": "opentelemetry-instrumentation-cassandra==0.44b0.dev", 62 }, 63 { 64 "library": "celery >= 4.0, < 6.0", 65 "instrumentation": "opentelemetry-instrumentation-celery==0.44b0.dev", 66 }, 67 { 68 "library": "confluent-kafka >= 1.8.2, <= 2.3.0", 69 "instrumentation": "opentelemetry-instrumentation-confluent-kafka==0.44b0.dev", 70 }, 71 { 72 "library": "django >= 1.10", 73 "instrumentation": "opentelemetry-instrumentation-django==0.44b0.dev", 74 }, 75 { 76 "library": "elasticsearch >= 2.0", 77 "instrumentation": "opentelemetry-instrumentation-elasticsearch==0.44b0.dev", 78 }, 79 { 80 "library": "falcon >= 1.4.1, < 3.1.2", 81 "instrumentation": "opentelemetry-instrumentation-falcon==0.44b0.dev", 82 }, 83 { 84 "library": "fastapi ~= 0.58", 85 "instrumentation": "opentelemetry-instrumentation-fastapi==0.44b0.dev", 86 }, 87 { 88 "library": "werkzeug < 3.0.0", 89 "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", 90 }, 91 { 92 "library": "flask >= 1.0", 93 "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", 94 }, 95 { 96 "library": "grpcio ~= 1.27", 97 "instrumentation": "opentelemetry-instrumentation-grpc==0.44b0.dev", 98 }, 99 { 100 "library": "httpx >= 0.18.0", 101 "instrumentation": "opentelemetry-instrumentation-httpx==0.44b0.dev", 102 }, 103 { 104 "library": "jinja2 >= 2.7, < 4.0", 105 "instrumentation": "opentelemetry-instrumentation-jinja2==0.44b0.dev", 106 }, 107 { 108 "library": "kafka-python >= 2.0", 109 "instrumentation": "opentelemetry-instrumentation-kafka-python==0.44b0.dev", 110 }, 111 { 112 "library": "mysql-connector-python ~= 8.0", 113 "instrumentation": "opentelemetry-instrumentation-mysql==0.44b0.dev", 114 }, 115 { 116 "library": "mysqlclient < 3", 117 "instrumentation": "opentelemetry-instrumentation-mysqlclient==0.44b0.dev", 118 }, 119 { 120 "library": "pika >= 0.12.0", 121 "instrumentation": "opentelemetry-instrumentation-pika==0.44b0.dev", 122 }, 123 { 124 "library": "psycopg2 >= 2.7.3.1", 125 "instrumentation": "opentelemetry-instrumentation-psycopg2==0.44b0.dev", 126 }, 127 { 128 "library": "pymemcache >= 1.3.5, < 5", 129 "instrumentation": "opentelemetry-instrumentation-pymemcache==0.44b0.dev", 130 }, 131 { 132 "library": "pymongo >= 3.1, < 5.0", 133 "instrumentation": "opentelemetry-instrumentation-pymongo==0.44b0.dev", 134 }, 135 { 136 "library": "PyMySQL < 2", 137 "instrumentation": "opentelemetry-instrumentation-pymysql==0.44b0.dev", 138 }, 139 { 140 "library": "pyramid >= 1.7", 141 "instrumentation": "opentelemetry-instrumentation-pyramid==0.44b0.dev", 142 }, 143 { 144 "library": "redis >= 2.6", 145 "instrumentation": "opentelemetry-instrumentation-redis==0.44b0.dev", 146 }, 147 { 148 "library": "remoulade >= 0.50", 149 "instrumentation": "opentelemetry-instrumentation-remoulade==0.44b0.dev", 150 }, 151 { 152 "library": "requests ~= 2.0", 153 "instrumentation": "opentelemetry-instrumentation-requests==0.44b0.dev", 154 }, 155 { 156 "library": "scikit-learn ~= 0.24.0", 157 "instrumentation": "opentelemetry-instrumentation-sklearn==0.44b0.dev", 158 }, 159 { 160 "library": "sqlalchemy", 161 "instrumentation": "opentelemetry-instrumentation-sqlalchemy==0.44b0.dev", 162 }, 163 { 164 "library": "starlette ~= 0.13.0", 165 "instrumentation": "opentelemetry-instrumentation-starlette==0.44b0.dev", 166 }, 167 { 168 "library": "psutil >= 5", 169 "instrumentation": "opentelemetry-instrumentation-system-metrics==0.44b0.dev", 170 }, 171 { 172 "library": "tornado >= 5.1.1", 173 "instrumentation": "opentelemetry-instrumentation-tornado==0.44b0.dev", 174 }, 175 { 176 "library": "tortoise-orm >= 0.17.0", 177 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.44b0.dev", 178 }, 179 { 180 "library": "pydantic >= 1.10.2", 181 "instrumentation": "opentelemetry-instrumentation-tortoiseorm==0.44b0.dev", 182 }, 183 { 184 "library": "urllib3 >= 1.0.0, < 3.0.0", 185 "instrumentation": "opentelemetry-instrumentation-urllib3==0.44b0.dev", 186 }, 187 ] 188 default_instrumentations = [ 189 "opentelemetry-instrumentation-asyncio==0.44b0.dev", 190 "opentelemetry-instrumentation-aws-lambda==0.44b0.dev", 191 "opentelemetry-instrumentation-dbapi==0.44b0.dev", 192 "opentelemetry-instrumentation-logging==0.44b0.dev", 193 "opentelemetry-instrumentation-sqlite3==0.44b0.dev", 194 "opentelemetry-instrumentation-urllib==0.44b0.dev", 195 "opentelemetry-instrumentation-wsgi==0.44b0.dev", 196 ] 197 [end of opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py @@ -84,10 +84,6 @@ "library": "fastapi ~= 0.58", "instrumentation": "opentelemetry-instrumentation-fastapi==0.44b0.dev", }, - { - "library": "werkzeug < 3.0.0", - "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev", - }, { "library": "flask >= 1.0", "instrumentation": "opentelemetry-instrumentation-flask==0.44b0.dev",
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py\n@@ -84,10 +84,6 @@\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.44b0.dev\",\n },\n- {\n- \"library\": \"werkzeug < 3.0.0\",\n- \"instrumentation\": \"opentelemetry-instrumentation-flask==0.44b0.dev\",\n- },\n {\n \"library\": \"flask >= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.44b0.dev\",\n", "issue": "Remove werkzeug from Flask instrumentation dependencies\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# DO NOT EDIT. THIS FILE WAS AUTOGENERATED FROM INSTRUMENTATION PACKAGES.\n# RUN `python scripts/generate_instrumentation_bootstrap.py` TO REGENERATE.\n\nlibraries = [\n {\n \"library\": \"aio_pika >= 7.2.0, < 10.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aio-pika==0.44b0.dev\",\n },\n {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-client==0.44b0.dev\",\n },\n {\n \"library\": \"aiohttp ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiohttp-server==0.44b0.dev\",\n },\n {\n \"library\": \"aiopg >= 0.13.0, < 2.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-aiopg==0.44b0.dev\",\n },\n {\n \"library\": \"asgiref ~= 3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asgi==0.44b0.dev\",\n },\n {\n \"library\": \"asyncpg >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-asyncpg==0.44b0.dev\",\n },\n {\n \"library\": \"boto~=2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto==0.44b0.dev\",\n },\n {\n \"library\": \"boto3 ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-boto3sqs==0.44b0.dev\",\n },\n {\n \"library\": \"botocore ~= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-botocore==0.44b0.dev\",\n },\n {\n \"library\": \"cassandra-driver ~= 3.25\",\n \"instrumentation\": \"opentelemetry-instrumentation-cassandra==0.44b0.dev\",\n },\n {\n \"library\": \"scylla-driver ~= 3.25\",\n \"instrumentation\": \"opentelemetry-instrumentation-cassandra==0.44b0.dev\",\n },\n {\n \"library\": \"celery >= 4.0, < 6.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-celery==0.44b0.dev\",\n },\n {\n \"library\": \"confluent-kafka >= 1.8.2, <= 2.3.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-confluent-kafka==0.44b0.dev\",\n },\n {\n \"library\": \"django >= 1.10\",\n \"instrumentation\": \"opentelemetry-instrumentation-django==0.44b0.dev\",\n },\n {\n \"library\": \"elasticsearch >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-elasticsearch==0.44b0.dev\",\n },\n {\n \"library\": \"falcon >= 1.4.1, < 3.1.2\",\n \"instrumentation\": \"opentelemetry-instrumentation-falcon==0.44b0.dev\",\n },\n {\n \"library\": \"fastapi ~= 0.58\",\n \"instrumentation\": \"opentelemetry-instrumentation-fastapi==0.44b0.dev\",\n },\n {\n \"library\": \"werkzeug < 3.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.44b0.dev\",\n },\n {\n \"library\": \"flask >= 1.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-flask==0.44b0.dev\",\n },\n {\n \"library\": \"grpcio ~= 1.27\",\n \"instrumentation\": \"opentelemetry-instrumentation-grpc==0.44b0.dev\",\n },\n {\n \"library\": \"httpx >= 0.18.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-httpx==0.44b0.dev\",\n },\n {\n \"library\": \"jinja2 >= 2.7, < 4.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-jinja2==0.44b0.dev\",\n },\n {\n \"library\": \"kafka-python >= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-kafka-python==0.44b0.dev\",\n },\n {\n \"library\": \"mysql-connector-python ~= 8.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysql==0.44b0.dev\",\n },\n {\n \"library\": \"mysqlclient < 3\",\n \"instrumentation\": \"opentelemetry-instrumentation-mysqlclient==0.44b0.dev\",\n },\n {\n \"library\": \"pika >= 0.12.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pika==0.44b0.dev\",\n },\n {\n \"library\": \"psycopg2 >= 2.7.3.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-psycopg2==0.44b0.dev\",\n },\n {\n \"library\": \"pymemcache >= 1.3.5, < 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymemcache==0.44b0.dev\",\n },\n {\n \"library\": \"pymongo >= 3.1, < 5.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymongo==0.44b0.dev\",\n },\n {\n \"library\": \"PyMySQL < 2\",\n \"instrumentation\": \"opentelemetry-instrumentation-pymysql==0.44b0.dev\",\n },\n {\n \"library\": \"pyramid >= 1.7\",\n \"instrumentation\": \"opentelemetry-instrumentation-pyramid==0.44b0.dev\",\n },\n {\n \"library\": \"redis >= 2.6\",\n \"instrumentation\": \"opentelemetry-instrumentation-redis==0.44b0.dev\",\n },\n {\n \"library\": \"remoulade >= 0.50\",\n \"instrumentation\": \"opentelemetry-instrumentation-remoulade==0.44b0.dev\",\n },\n {\n \"library\": \"requests ~= 2.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-requests==0.44b0.dev\",\n },\n {\n \"library\": \"scikit-learn ~= 0.24.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-sklearn==0.44b0.dev\",\n },\n {\n \"library\": \"sqlalchemy\",\n \"instrumentation\": \"opentelemetry-instrumentation-sqlalchemy==0.44b0.dev\",\n },\n {\n \"library\": \"starlette ~= 0.13.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-starlette==0.44b0.dev\",\n },\n {\n \"library\": \"psutil >= 5\",\n \"instrumentation\": \"opentelemetry-instrumentation-system-metrics==0.44b0.dev\",\n },\n {\n \"library\": \"tornado >= 5.1.1\",\n \"instrumentation\": \"opentelemetry-instrumentation-tornado==0.44b0.dev\",\n },\n {\n \"library\": \"tortoise-orm >= 0.17.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.44b0.dev\",\n },\n {\n \"library\": \"pydantic >= 1.10.2\",\n \"instrumentation\": \"opentelemetry-instrumentation-tortoiseorm==0.44b0.dev\",\n },\n {\n \"library\": \"urllib3 >= 1.0.0, < 3.0.0\",\n \"instrumentation\": \"opentelemetry-instrumentation-urllib3==0.44b0.dev\",\n },\n]\ndefault_instrumentations = [\n \"opentelemetry-instrumentation-asyncio==0.44b0.dev\",\n \"opentelemetry-instrumentation-aws-lambda==0.44b0.dev\",\n \"opentelemetry-instrumentation-dbapi==0.44b0.dev\",\n \"opentelemetry-instrumentation-logging==0.44b0.dev\",\n \"opentelemetry-instrumentation-sqlite3==0.44b0.dev\",\n \"opentelemetry-instrumentation-urllib==0.44b0.dev\",\n \"opentelemetry-instrumentation-wsgi==0.44b0.dev\",\n]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/bootstrap_gen.py"}]}
3,208
212
gh_patches_debug_64230
rasdani/github-patches
git_diff
optuna__optuna-56
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Incompatibility with old versions of SQLAlchemy. Connecting to PostgreSQL fails with old versions of SQLAlchemy raising an error: `sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name`. This error is resolved once sqlalchemy version is updated. For example: ```python >>> import sqlalchemy >>> sqlalchemy.__version__ '1.0.13' >>> from pfnopt.storages import RDBStorage >>> RDBStorage(url='postgresql://pfnopt:somepassword@localhost:5432/some_db') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/sano/PycharmProjects/pfnopt/pfnopt/storages/rdb.py", line 85, in __init__ Base.metadata.create_all(self.engine) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/schema.py", line 3695, in create_all tables=tables) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1856, in _run_visitor conn._run_visitor(visitorcallable, element, **kwargs) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1481, in _run_visitor **kwargs).traverse_single(element) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 121, in traverse_single return meth(obj, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 720, in visit_metadata _ddl_runner=self) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/event/attr.py", line 256, in __call__ fn(*args, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/util/langhelpers.py", line 546, in __call__ return getattr(self.target, self.name)(*arg, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/sqltypes.py", line 1040, in _on_metadata_create t._on_metadata_create(target, bind, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1379, in _on_metadata_create self.create(bind=bind, checkfirst=checkfirst) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1317, in create bind.execute(CreateEnumType(self)) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 914, in execute return meth(self, multiparams, params) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection return connection._execute_ddl(self, multiparams, params) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 962, in _execute_ddl compiled = ddl.compile(dialect=dialect) File "<string>", line 1, in <lambda> File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/elements.py", line 494, in compile return self._compiler(dialect, bind=bind, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 26, in _compiler return dialect.ddl_compiler(dialect, self, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 190, in __init__ self.string = self.process(self.statement, **compile_kwargs) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 213, in process return obj._compiler_dispatch(self, **kwargs) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 81, in _compiler_dispatch return meth(self, **kw) File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1613, in visit_create_enum_type self.preparer.format_type(type_), File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1857, in format_type raise exc.CompileError("Postgresql ENUM type requires a name.") sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name. ``` </issue> <code> [start of setup.py] 1 import os 2 from setuptools import find_packages 3 from setuptools import setup 4 import sys 5 6 7 def get_version(): 8 version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py') 9 with open(version_filepath) as f: 10 for line in f: 11 if line.startswith('__version__'): 12 return line.strip().split()[-1][1:-1] 13 assert False 14 15 16 tests_require = ['pytest', 'hacking', 'mock'] 17 if sys.version_info[0] == 3: 18 tests_require.append('mypy') 19 20 21 setup( 22 name='pfnopt', 23 version=get_version(), 24 description='', 25 author='Takuya Akiba', 26 author_email='[email protected]', 27 packages=find_packages(), 28 install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'], 29 tests_require=tests_require, 30 extras_require={'testing': tests_require}, 31 entry_points={ 32 'console_scripts': ['pfnopt = pfnopt.cli:main'], 33 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy'] 34 } 35 ) 36 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -25,7 +25,7 @@ author='Takuya Akiba', author_email='[email protected]', packages=find_packages(), - install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'], + install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'], tests_require=tests_require, extras_require={'testing': tests_require}, entry_points={
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -25,7 +25,7 @@\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n- install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n+ install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n", "issue": "Incompatibility with old versions of SQLAlchemy.\nConnecting to PostgreSQL fails with old versions of SQLAlchemy raising an error: `sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name`. This error is resolved once sqlalchemy version is updated.\r\n\r\nFor example:\r\n```python\r\n>>> import sqlalchemy\r\n>>> sqlalchemy.__version__\r\n'1.0.13'\r\n>>> from pfnopt.storages import RDBStorage\r\n>>> RDBStorage(url='postgresql://pfnopt:somepassword@localhost:5432/some_db')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/sano/PycharmProjects/pfnopt/pfnopt/storages/rdb.py\", line 85, in __init__\r\n Base.metadata.create_all(self.engine)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/schema.py\", line 3695, in create_all\r\n tables=tables)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 1856, in _run_visitor\r\n conn._run_visitor(visitorcallable, element, **kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 1481, in _run_visitor\r\n **kwargs).traverse_single(element)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py\", line 121, in traverse_single\r\n return meth(obj, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 720, in visit_metadata\r\n _ddl_runner=self)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/event/attr.py\", line 256, in __call__\r\n fn(*args, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/util/langhelpers.py\", line 546, in __call__\r\n return getattr(self.target, self.name)(*arg, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/sqltypes.py\", line 1040, in _on_metadata_create\r\n t._on_metadata_create(target, bind, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1379, in _on_metadata_create\r\n self.create(bind=bind, checkfirst=checkfirst)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1317, in create\r\n bind.execute(CreateEnumType(self))\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 914, in execute\r\n return meth(self, multiparams, params)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 68, in _execute_on_connection\r\n return connection._execute_ddl(self, multiparams, params)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py\", line 962, in _execute_ddl\r\n compiled = ddl.compile(dialect=dialect)\r\n File \"<string>\", line 1, in <lambda>\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/elements.py\", line 494, in compile\r\n return self._compiler(dialect, bind=bind, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py\", line 26, in _compiler\r\n return dialect.ddl_compiler(dialect, self, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py\", line 190, in __init__\r\n self.string = self.process(self.statement, **compile_kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py\", line 213, in process\r\n return obj._compiler_dispatch(self, **kwargs)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py\", line 81, in _compiler_dispatch\r\n return meth(self, **kw)\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1613, in visit_create_enum_type\r\n self.preparer.format_type(type_),\r\n File \"/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py\", line 1857, in format_type\r\n raise exc.CompileError(\"Postgresql ENUM type requires a name.\")\r\nsqlalchemy.exc.CompileError: Postgresql ENUM type requires a name.\r\n```\n", "before_files": [{"content": "import os\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\n\ndef get_version():\n version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ntests_require = ['pytest', 'hacking', 'mock']\nif sys.version_info[0] == 3:\n tests_require.append('mypy')\n\n\nsetup(\n name='pfnopt',\n version=get_version(),\n description='',\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n 'console_scripts': ['pfnopt = pfnopt.cli:main'],\n 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']\n }\n)\n", "path": "setup.py"}]}
2,142
142
gh_patches_debug_30082
rasdani/github-patches
git_diff
python-trio__trio-967
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> trio.Path.home raises "AttributeError: _flavour" I get the following on Mac and Linux: ``` >>> import trio >>> trio.Path.home() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/Cellar/python/3.7.2_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py", line 1055, in home return cls(cls()._flavour.gethomedir(None)) File "/Users/zach/.allotropic/python/default/lib/python3.7/site-packages/trio/_path.py", line 157, in __getattr__ raise AttributeError(name) AttributeError: _flavour ``` As the traceback states, ``pathlib.Path`` is instantiating itself to dertermine its ``_flavour``, which is a class to delegate filesystem calls to differing Windows and Posix environments. I think the explanation is that trio.Path does not override ``__new__``, so it doesn't get a chance to turn into a ``PosixPath`` or ``WindowsPath`` object like ``pathlib.Path`` objects do. The ``_flavour`` attribute is only available from these system-specific classes, and the ``AsyncAutoWrapperType`` doesn't explicitly wrap these classes to make it an attribute of ``trio.Path``. One workaround is to override ``home`` in ``trio.Path``: ``` @classmethod def home(cls): return cls(cls()._wraps()._flavour.gethomedir(None)) ``` There are other approaches and I'd like to check in with the team here. Any thoughts? </issue> <code> [start of trio/_path.py] 1 from functools import wraps, partial 2 import os 3 import types 4 import pathlib 5 6 import trio 7 from trio._util import async_wraps, fspath 8 9 __all__ = ['Path'] 10 11 12 # python3.5 compat: __fspath__ does not exist in 3.5, so unwrap any trio.Path 13 # being passed to any wrapped method 14 def unwrap_paths(args): 15 new_args = [] 16 for arg in args: 17 if isinstance(arg, Path): 18 arg = arg._wrapped 19 new_args.append(arg) 20 return new_args 21 22 23 # re-wrap return value from methods that return new instances of pathlib.Path 24 def rewrap_path(value): 25 if isinstance(value, pathlib.Path): 26 value = Path(value) 27 return value 28 29 30 def _forward_factory(cls, attr_name, attr): 31 @wraps(attr) 32 def wrapper(self, *args, **kwargs): 33 args = unwrap_paths(args) 34 attr = getattr(self._wrapped, attr_name) 35 value = attr(*args, **kwargs) 36 return rewrap_path(value) 37 38 return wrapper 39 40 41 def _forward_magic(cls, attr): 42 sentinel = object() 43 44 @wraps(attr) 45 def wrapper(self, other=sentinel): 46 if other is sentinel: 47 return attr(self._wrapped) 48 if isinstance(other, cls): 49 other = other._wrapped 50 value = attr(self._wrapped, other) 51 return rewrap_path(value) 52 53 return wrapper 54 55 56 def thread_wrapper_factory(cls, meth_name): 57 @async_wraps(cls, pathlib.Path, meth_name) 58 async def wrapper(self, *args, **kwargs): 59 args = unwrap_paths(args) 60 meth = getattr(self._wrapped, meth_name) 61 func = partial(meth, *args, **kwargs) 62 value = await trio.run_sync_in_worker_thread(func) 63 return rewrap_path(value) 64 65 return wrapper 66 67 68 class AsyncAutoWrapperType(type): 69 def __init__(cls, name, bases, attrs): 70 super().__init__(name, bases, attrs) 71 72 cls._forward = [] 73 type(cls).generate_forwards(cls, attrs) 74 type(cls).generate_wraps(cls, attrs) 75 type(cls).generate_magic(cls, attrs) 76 77 def generate_forwards(cls, attrs): 78 # forward functions of _forwards 79 for attr_name, attr in cls._forwards.__dict__.items(): 80 if attr_name.startswith('_') or attr_name in attrs: 81 continue 82 83 if isinstance(attr, property): 84 cls._forward.append(attr_name) 85 elif isinstance(attr, types.FunctionType): 86 wrapper = _forward_factory(cls, attr_name, attr) 87 setattr(cls, attr_name, wrapper) 88 else: 89 raise TypeError(attr_name, type(attr)) 90 91 def generate_wraps(cls, attrs): 92 # generate wrappers for functions of _wraps 93 for attr_name, attr in cls._wraps.__dict__.items(): 94 if attr_name.startswith('_') or attr_name in attrs: 95 continue 96 97 if isinstance(attr, classmethod): 98 setattr(cls, attr_name, attr) 99 elif isinstance(attr, types.FunctionType): 100 wrapper = thread_wrapper_factory(cls, attr_name) 101 setattr(cls, attr_name, wrapper) 102 else: 103 raise TypeError(attr_name, type(attr)) 104 105 def generate_magic(cls, attrs): 106 # generate wrappers for magic 107 for attr_name in cls._forward_magic: 108 attr = getattr(cls._forwards, attr_name) 109 wrapper = _forward_magic(cls, attr) 110 setattr(cls, attr_name, wrapper) 111 112 113 class Path(metaclass=AsyncAutoWrapperType): 114 """A :class:`pathlib.Path` wrapper that executes blocking methods in 115 :meth:`trio.run_sync_in_worker_thread`. 116 117 """ 118 119 _wraps = pathlib.Path 120 _forwards = pathlib.PurePath 121 _forward_magic = [ 122 '__str__', '__bytes__', '__truediv__', '__rtruediv__', '__eq__', 123 '__lt__', '__le__', '__gt__', '__ge__' 124 ] 125 126 def __init__(self, *args): 127 args = unwrap_paths(args) 128 129 self._wrapped = pathlib.Path(*args) 130 131 async def iterdir(self): 132 """ 133 Like :meth:`pathlib.Path.iterdir`, but async. 134 135 This is an async method that returns a synchronous iterator, so you 136 use it like:: 137 138 for subpath in await mypath.iterdir(): 139 ... 140 141 Note that it actually loads the whole directory list into memory 142 immediately, during the initial call. (See `issue #501 143 <https://github.com/python-trio/trio/issues/501>`__ for discussion.) 144 145 """ 146 147 def _load_items(): 148 return list(self._wrapped.iterdir()) 149 150 items = await trio.run_sync_in_worker_thread(_load_items) 151 return (Path(item) for item in items) 152 153 def __getattr__(self, name): 154 if name in self._forward: 155 value = getattr(self._wrapped, name) 156 return rewrap_path(value) 157 raise AttributeError(name) 158 159 def __dir__(self): 160 return super().__dir__() + self._forward 161 162 def __repr__(self): 163 return 'trio.Path({})'.format(repr(str(self))) 164 165 def __fspath__(self): 166 return fspath(self._wrapped) 167 168 @wraps(pathlib.Path.open) 169 async def open(self, *args, **kwargs): 170 """Open the file pointed to by the path, like the :func:`trio.open_file` 171 function does. 172 173 """ 174 175 func = partial(self._wrapped.open, *args, **kwargs) 176 value = await trio.run_sync_in_worker_thread(func) 177 return trio.wrap_file(value) 178 179 180 # The value of Path.absolute.__doc__ makes a reference to 181 # :meth:~pathlib.Path.absolute, which does not exist. Removing this makes more 182 # sense than inventing our own special docstring for this. 183 del Path.absolute.__doc__ 184 185 # python3.5 compat 186 if hasattr(os, 'PathLike'): 187 os.PathLike.register(Path) 188 [end of trio/_path.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/trio/_path.py b/trio/_path.py --- a/trio/_path.py +++ b/trio/_path.py @@ -54,7 +54,7 @@ def thread_wrapper_factory(cls, meth_name): - @async_wraps(cls, pathlib.Path, meth_name) + @async_wraps(cls, cls._wraps, meth_name) async def wrapper(self, *args, **kwargs): args = unwrap_paths(args) meth = getattr(self._wrapped, meth_name) @@ -65,6 +65,19 @@ return wrapper +def classmethod_wrapper_factory(cls, meth_name): + @classmethod + @async_wraps(cls, cls._wraps, meth_name) + async def wrapper(cls, *args, **kwargs): + args = unwrap_paths(args) + meth = getattr(cls._wraps, meth_name) + func = partial(meth, *args, **kwargs) + value = await trio.run_sync_in_worker_thread(func) + return rewrap_path(value) + + return wrapper + + class AsyncAutoWrapperType(type): def __init__(cls, name, bases, attrs): super().__init__(name, bases, attrs) @@ -93,9 +106,9 @@ for attr_name, attr in cls._wraps.__dict__.items(): if attr_name.startswith('_') or attr_name in attrs: continue - if isinstance(attr, classmethod): - setattr(cls, attr_name, attr) + wrapper = classmethod_wrapper_factory(cls, attr_name) + setattr(cls, attr_name, wrapper) elif isinstance(attr, types.FunctionType): wrapper = thread_wrapper_factory(cls, attr_name) setattr(cls, attr_name, wrapper)
{"golden_diff": "diff --git a/trio/_path.py b/trio/_path.py\n--- a/trio/_path.py\n+++ b/trio/_path.py\n@@ -54,7 +54,7 @@\n \n \n def thread_wrapper_factory(cls, meth_name):\n- @async_wraps(cls, pathlib.Path, meth_name)\n+ @async_wraps(cls, cls._wraps, meth_name)\n async def wrapper(self, *args, **kwargs):\n args = unwrap_paths(args)\n meth = getattr(self._wrapped, meth_name)\n@@ -65,6 +65,19 @@\n return wrapper\n \n \n+def classmethod_wrapper_factory(cls, meth_name):\n+ @classmethod\n+ @async_wraps(cls, cls._wraps, meth_name)\n+ async def wrapper(cls, *args, **kwargs):\n+ args = unwrap_paths(args)\n+ meth = getattr(cls._wraps, meth_name)\n+ func = partial(meth, *args, **kwargs)\n+ value = await trio.run_sync_in_worker_thread(func)\n+ return rewrap_path(value)\n+\n+ return wrapper\n+\n+\n class AsyncAutoWrapperType(type):\n def __init__(cls, name, bases, attrs):\n super().__init__(name, bases, attrs)\n@@ -93,9 +106,9 @@\n for attr_name, attr in cls._wraps.__dict__.items():\n if attr_name.startswith('_') or attr_name in attrs:\n continue\n-\n if isinstance(attr, classmethod):\n- setattr(cls, attr_name, attr)\n+ wrapper = classmethod_wrapper_factory(cls, attr_name)\n+ setattr(cls, attr_name, wrapper)\n elif isinstance(attr, types.FunctionType):\n wrapper = thread_wrapper_factory(cls, attr_name)\n setattr(cls, attr_name, wrapper)\n", "issue": "trio.Path.home raises \"AttributeError: _flavour\"\nI get the following on Mac and Linux:\r\n```\r\n>>> import trio\r\n>>> trio.Path.home()\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/Cellar/python/3.7.2_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/pathlib.py\", line 1055, in home\r\n return cls(cls()._flavour.gethomedir(None))\r\n File \"/Users/zach/.allotropic/python/default/lib/python3.7/site-packages/trio/_path.py\", line 157, in __getattr__\r\n raise AttributeError(name)\r\nAttributeError: _flavour\r\n```\r\nAs the traceback states, ``pathlib.Path`` is instantiating itself to dertermine its ``_flavour``, which is a class to delegate filesystem calls to differing Windows and Posix environments.\r\n\r\nI think the explanation is that trio.Path does not override ``__new__``, so it doesn't get a chance to turn into a ``PosixPath`` or ``WindowsPath`` object like ``pathlib.Path`` objects do. The ``_flavour`` attribute is only available from these system-specific classes, and the ``AsyncAutoWrapperType`` doesn't explicitly wrap these classes to make it an attribute of ``trio.Path``.\r\n\r\nOne workaround is to override ``home`` in ``trio.Path``:\r\n```\r\n @classmethod\r\n def home(cls):\r\n return cls(cls()._wraps()._flavour.gethomedir(None))\r\n```\r\n\r\nThere are other approaches and I'd like to check in with the team here. Any thoughts?\n", "before_files": [{"content": "from functools import wraps, partial\nimport os\nimport types\nimport pathlib\n\nimport trio\nfrom trio._util import async_wraps, fspath\n\n__all__ = ['Path']\n\n\n# python3.5 compat: __fspath__ does not exist in 3.5, so unwrap any trio.Path\n# being passed to any wrapped method\ndef unwrap_paths(args):\n new_args = []\n for arg in args:\n if isinstance(arg, Path):\n arg = arg._wrapped\n new_args.append(arg)\n return new_args\n\n\n# re-wrap return value from methods that return new instances of pathlib.Path\ndef rewrap_path(value):\n if isinstance(value, pathlib.Path):\n value = Path(value)\n return value\n\n\ndef _forward_factory(cls, attr_name, attr):\n @wraps(attr)\n def wrapper(self, *args, **kwargs):\n args = unwrap_paths(args)\n attr = getattr(self._wrapped, attr_name)\n value = attr(*args, **kwargs)\n return rewrap_path(value)\n\n return wrapper\n\n\ndef _forward_magic(cls, attr):\n sentinel = object()\n\n @wraps(attr)\n def wrapper(self, other=sentinel):\n if other is sentinel:\n return attr(self._wrapped)\n if isinstance(other, cls):\n other = other._wrapped\n value = attr(self._wrapped, other)\n return rewrap_path(value)\n\n return wrapper\n\n\ndef thread_wrapper_factory(cls, meth_name):\n @async_wraps(cls, pathlib.Path, meth_name)\n async def wrapper(self, *args, **kwargs):\n args = unwrap_paths(args)\n meth = getattr(self._wrapped, meth_name)\n func = partial(meth, *args, **kwargs)\n value = await trio.run_sync_in_worker_thread(func)\n return rewrap_path(value)\n\n return wrapper\n\n\nclass AsyncAutoWrapperType(type):\n def __init__(cls, name, bases, attrs):\n super().__init__(name, bases, attrs)\n\n cls._forward = []\n type(cls).generate_forwards(cls, attrs)\n type(cls).generate_wraps(cls, attrs)\n type(cls).generate_magic(cls, attrs)\n\n def generate_forwards(cls, attrs):\n # forward functions of _forwards\n for attr_name, attr in cls._forwards.__dict__.items():\n if attr_name.startswith('_') or attr_name in attrs:\n continue\n\n if isinstance(attr, property):\n cls._forward.append(attr_name)\n elif isinstance(attr, types.FunctionType):\n wrapper = _forward_factory(cls, attr_name, attr)\n setattr(cls, attr_name, wrapper)\n else:\n raise TypeError(attr_name, type(attr))\n\n def generate_wraps(cls, attrs):\n # generate wrappers for functions of _wraps\n for attr_name, attr in cls._wraps.__dict__.items():\n if attr_name.startswith('_') or attr_name in attrs:\n continue\n\n if isinstance(attr, classmethod):\n setattr(cls, attr_name, attr)\n elif isinstance(attr, types.FunctionType):\n wrapper = thread_wrapper_factory(cls, attr_name)\n setattr(cls, attr_name, wrapper)\n else:\n raise TypeError(attr_name, type(attr))\n\n def generate_magic(cls, attrs):\n # generate wrappers for magic\n for attr_name in cls._forward_magic:\n attr = getattr(cls._forwards, attr_name)\n wrapper = _forward_magic(cls, attr)\n setattr(cls, attr_name, wrapper)\n\n\nclass Path(metaclass=AsyncAutoWrapperType):\n \"\"\"A :class:`pathlib.Path` wrapper that executes blocking methods in\n :meth:`trio.run_sync_in_worker_thread`.\n\n \"\"\"\n\n _wraps = pathlib.Path\n _forwards = pathlib.PurePath\n _forward_magic = [\n '__str__', '__bytes__', '__truediv__', '__rtruediv__', '__eq__',\n '__lt__', '__le__', '__gt__', '__ge__'\n ]\n\n def __init__(self, *args):\n args = unwrap_paths(args)\n\n self._wrapped = pathlib.Path(*args)\n\n async def iterdir(self):\n \"\"\"\n Like :meth:`pathlib.Path.iterdir`, but async.\n\n This is an async method that returns a synchronous iterator, so you\n use it like::\n\n for subpath in await mypath.iterdir():\n ...\n\n Note that it actually loads the whole directory list into memory\n immediately, during the initial call. (See `issue #501\n <https://github.com/python-trio/trio/issues/501>`__ for discussion.)\n\n \"\"\"\n\n def _load_items():\n return list(self._wrapped.iterdir())\n\n items = await trio.run_sync_in_worker_thread(_load_items)\n return (Path(item) for item in items)\n\n def __getattr__(self, name):\n if name in self._forward:\n value = getattr(self._wrapped, name)\n return rewrap_path(value)\n raise AttributeError(name)\n\n def __dir__(self):\n return super().__dir__() + self._forward\n\n def __repr__(self):\n return 'trio.Path({})'.format(repr(str(self)))\n\n def __fspath__(self):\n return fspath(self._wrapped)\n\n @wraps(pathlib.Path.open)\n async def open(self, *args, **kwargs):\n \"\"\"Open the file pointed to by the path, like the :func:`trio.open_file`\n function does.\n\n \"\"\"\n\n func = partial(self._wrapped.open, *args, **kwargs)\n value = await trio.run_sync_in_worker_thread(func)\n return trio.wrap_file(value)\n\n\n# The value of Path.absolute.__doc__ makes a reference to\n# :meth:~pathlib.Path.absolute, which does not exist. Removing this makes more\n# sense than inventing our own special docstring for this.\ndel Path.absolute.__doc__\n\n# python3.5 compat\nif hasattr(os, 'PathLike'):\n os.PathLike.register(Path)\n", "path": "trio/_path.py"}]}
2,651
388
gh_patches_debug_36632
rasdani/github-patches
git_diff
svthalia__concrexit-2974
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Education Exam/Summary count incorrect. ### Describe the bug For some courses it says there are >500 documents. That's clearly not correct. Something seems to be wrong here: https://github.com/svthalia/concrexit/blob/ce784be158c2e26afa9d389d67065db1cb1a716c/website/education/views.py#L48-L58 ### How to reproduce Go to https://thalia.nu/education/courses/ </issue> <code> [start of website/education/views.py] 1 """Views provided by the education package.""" 2 import os 3 from datetime import date, datetime 4 5 from django.contrib.auth.decorators import login_required 6 from django.contrib.messages.views import SuccessMessageMixin 7 from django.core.exceptions import PermissionDenied 8 from django.db.models import Count 9 from django.http import HttpResponse 10 from django.shortcuts import redirect 11 from django.urls import reverse_lazy 12 from django.utils import timezone 13 from django.utils.decorators import method_decorator 14 from django.utils.translation import gettext_lazy as _ 15 from django.views.generic import CreateView, DetailView, ListView, TemplateView 16 17 from members.decorators import membership_required 18 from utils.media.services import get_media_url 19 20 from . import emails 21 from .forms import AddExamForm, AddSummaryForm 22 from .models import Category, Course, Exam, Summary 23 24 25 class CourseIndexView(ListView): 26 """Render an overview of the courses.""" 27 28 queryset = ( 29 Course.objects.filter(until=None) 30 .prefetch_related("categories", "old_courses") 31 .annotate(summary_count=Count("summary")) 32 .annotate(exam_count=Count("exam")) 33 ) 34 template_name = "education/courses.html" 35 36 def get_ordering(self) -> str: 37 return "name" 38 39 def get_context_data(self, **kwargs) -> dict: 40 context = super().get_context_data(**kwargs) 41 context.update( 42 { 43 "courses": ( 44 { 45 "course_code": x.course_code, 46 "name": x.name, 47 "categories": x.categories.all(), 48 "document_count": sum( 49 [ 50 x.summary_count, 51 x.exam_count, 52 ] 53 + [ 54 c.summary_set.filter(accepted=True).count() 55 + c.exam_set.filter(accepted=True).count() 56 for c in x.old_courses.all() 57 ] 58 ), 59 "url": x.get_absolute_url(), 60 } 61 for x in context["object_list"] 62 ), 63 "categories": Category.objects.all(), 64 } 65 ) 66 return context 67 68 69 class CourseDetailView(DetailView): 70 """Render the detail page of one specific course.""" 71 72 model = Course 73 context_object_name = "course" 74 template_name = "education/course.html" 75 76 def get_context_data(self, **kwargs) -> dict: 77 context = super().get_context_data(**kwargs) 78 obj = context["course"] 79 courses = list(obj.old_courses.all()) 80 courses.append(obj) 81 items = {} 82 for course in courses: 83 for summary in course.summary_set.filter(accepted=True): 84 if summary.year not in items: 85 items[summary.year] = { 86 "summaries": [], 87 "exams": [], 88 "legacy": course if course.pk != obj.pk else None, 89 } 90 items[summary.year]["summaries"].append( 91 { 92 "year": summary.year, 93 "name": summary.name, 94 "language": summary.language, 95 "id": summary.id, 96 } 97 ) 98 for exam in course.exam_set.filter(accepted=True): 99 if exam.year not in items: 100 items[exam.year] = { 101 "summaries": [], 102 "exams": [], 103 "legacy": course if course.pk != obj.pk else None, 104 } 105 items[exam.year]["exams"].append( 106 { 107 "type": "exam", 108 "year": exam.year, 109 "name": f"{exam.get_type_display()} {exam.name}", 110 "language": exam.language, 111 "id": exam.id, 112 } 113 ) 114 context.update({"items": sorted(items.items(), key=lambda x: x[0])}) 115 return context 116 117 118 @method_decorator(login_required, "dispatch") 119 @method_decorator(membership_required, "dispatch") 120 class ExamDetailView(DetailView): 121 """Fetch and output the specified exam.""" 122 123 model = Exam 124 125 def get(self, request, *args, **kwargs) -> HttpResponse: 126 response = super().get(request, *args, **kwargs) 127 obj = response.context_data["object"] 128 obj.download_count += 1 129 obj.save() 130 131 ext = os.path.splitext(obj.file.name)[1] 132 filename = f"{obj.course.name}-summary{obj.year}{ext}" 133 return redirect(get_media_url(obj.file, filename)) 134 135 136 @method_decorator(login_required, "dispatch") 137 @method_decorator(membership_required, "dispatch") 138 class SummaryDetailView(DetailView): 139 """Fetch and output the specified summary.""" 140 141 model = Summary 142 143 def get(self, request, *args, **kwargs) -> HttpResponse: 144 response = super().get(request, *args, **kwargs) 145 obj = response.context_data["object"] 146 obj.download_count += 1 147 obj.save() 148 149 ext = os.path.splitext(obj.file.name)[1] 150 filename = f"{obj.course.name}-summary{obj.year}{ext}" 151 return redirect(get_media_url(obj.file, filename)) 152 153 154 @method_decorator(login_required, "dispatch") 155 @method_decorator(membership_required, "dispatch") 156 class ExamCreateView(SuccessMessageMixin, CreateView): 157 """Render the form to submit a new exam.""" 158 159 model = Exam 160 form_class = AddExamForm 161 template_name = "education/add_exam.html" 162 success_url = reverse_lazy("education:submit-exam") 163 success_message = _("Exam submitted successfully.") 164 165 def get_initial(self) -> dict: 166 initial = super().get_initial() 167 initial["exam_date"] = date.today() 168 initial["course"] = self.kwargs.get("pk", None) 169 return initial 170 171 def form_valid(self, form) -> HttpResponse: 172 self.object = form.save(commit=False) 173 self.object.uploader = self.request.member 174 self.object.uploader_date = datetime.now() 175 self.object.save() 176 emails.send_document_notification(self.object) 177 return super().form_valid(form) 178 179 180 @method_decorator(login_required, "dispatch") 181 @method_decorator(membership_required, "dispatch") 182 class SummaryCreateView(SuccessMessageMixin, CreateView): 183 """Render the form to submit a new summary.""" 184 185 model = Summary 186 form_class = AddSummaryForm 187 template_name = "education/add_summary.html" 188 success_url = reverse_lazy("education:submit-summary") 189 success_message = _("Summary submitted successfully.") 190 191 def get_initial(self): 192 initial = super().get_initial() 193 initial["author"] = self.request.member.get_full_name() 194 initial["course"] = self.kwargs.get("pk", None) 195 return initial 196 197 def form_valid(self, form) -> HttpResponse: 198 self.object = form.save(commit=False) 199 self.object.uploader = self.request.member 200 self.object.uploader_date = datetime.now() 201 self.object.save() 202 emails.send_document_notification(self.object) 203 return super().form_valid(form) 204 205 206 @method_decorator(login_required, "dispatch") 207 class BookInfoView(TemplateView): 208 """Render a page with information about book sale. 209 210 Only available to members and to-be members 211 """ 212 213 template_name = "education/books.html" 214 215 def dispatch(self, request, *args, **kwargs) -> HttpResponse: 216 if request.member.has_active_membership() or ( 217 request.member.earliest_membership 218 and request.member.earliest_membership.since > timezone.now().date() 219 ): 220 return super().dispatch(request, *args, **kwargs) 221 raise PermissionDenied 222 [end of website/education/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/education/views.py b/website/education/views.py --- a/website/education/views.py +++ b/website/education/views.py @@ -5,7 +5,6 @@ from django.contrib.auth.decorators import login_required from django.contrib.messages.views import SuccessMessageMixin from django.core.exceptions import PermissionDenied -from django.db.models import Count from django.http import HttpResponse from django.shortcuts import redirect from django.urls import reverse_lazy @@ -25,11 +24,8 @@ class CourseIndexView(ListView): """Render an overview of the courses.""" - queryset = ( - Course.objects.filter(until=None) - .prefetch_related("categories", "old_courses") - .annotate(summary_count=Count("summary")) - .annotate(exam_count=Count("exam")) + queryset = Course.objects.filter(until=None).prefetch_related( + "categories", "old_courses" ) template_name = "education/courses.html" @@ -40,15 +36,15 @@ context = super().get_context_data(**kwargs) context.update( { - "courses": ( + "courses": [ { "course_code": x.course_code, "name": x.name, "categories": x.categories.all(), "document_count": sum( [ - x.summary_count, - x.exam_count, + x.summary_set.filter(accepted=True).count(), + x.exam_set.filter(accepted=True).count(), ] + [ c.summary_set.filter(accepted=True).count() @@ -59,7 +55,7 @@ "url": x.get_absolute_url(), } for x in context["object_list"] - ), + ], "categories": Category.objects.all(), } )
{"golden_diff": "diff --git a/website/education/views.py b/website/education/views.py\n--- a/website/education/views.py\n+++ b/website/education/views.py\n@@ -5,7 +5,6 @@\n from django.contrib.auth.decorators import login_required\n from django.contrib.messages.views import SuccessMessageMixin\n from django.core.exceptions import PermissionDenied\n-from django.db.models import Count\n from django.http import HttpResponse\n from django.shortcuts import redirect\n from django.urls import reverse_lazy\n@@ -25,11 +24,8 @@\n class CourseIndexView(ListView):\n \"\"\"Render an overview of the courses.\"\"\"\n \n- queryset = (\n- Course.objects.filter(until=None)\n- .prefetch_related(\"categories\", \"old_courses\")\n- .annotate(summary_count=Count(\"summary\"))\n- .annotate(exam_count=Count(\"exam\"))\n+ queryset = Course.objects.filter(until=None).prefetch_related(\n+ \"categories\", \"old_courses\"\n )\n template_name = \"education/courses.html\"\n \n@@ -40,15 +36,15 @@\n context = super().get_context_data(**kwargs)\n context.update(\n {\n- \"courses\": (\n+ \"courses\": [\n {\n \"course_code\": x.course_code,\n \"name\": x.name,\n \"categories\": x.categories.all(),\n \"document_count\": sum(\n [\n- x.summary_count,\n- x.exam_count,\n+ x.summary_set.filter(accepted=True).count(),\n+ x.exam_set.filter(accepted=True).count(),\n ]\n + [\n c.summary_set.filter(accepted=True).count()\n@@ -59,7 +55,7 @@\n \"url\": x.get_absolute_url(),\n }\n for x in context[\"object_list\"]\n- ),\n+ ],\n \"categories\": Category.objects.all(),\n }\n )\n", "issue": "Education Exam/Summary count incorrect.\n### Describe the bug\r\nFor some courses it says there are >500 documents. That's clearly not correct. Something seems to be wrong here: https://github.com/svthalia/concrexit/blob/ce784be158c2e26afa9d389d67065db1cb1a716c/website/education/views.py#L48-L58\r\n\r\n### How to reproduce\r\nGo to https://thalia.nu/education/courses/\r\n\n", "before_files": [{"content": "\"\"\"Views provided by the education package.\"\"\"\nimport os\nfrom datetime import date, datetime\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.db.models import Count\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import CreateView, DetailView, ListView, TemplateView\n\nfrom members.decorators import membership_required\nfrom utils.media.services import get_media_url\n\nfrom . import emails\nfrom .forms import AddExamForm, AddSummaryForm\nfrom .models import Category, Course, Exam, Summary\n\n\nclass CourseIndexView(ListView):\n \"\"\"Render an overview of the courses.\"\"\"\n\n queryset = (\n Course.objects.filter(until=None)\n .prefetch_related(\"categories\", \"old_courses\")\n .annotate(summary_count=Count(\"summary\"))\n .annotate(exam_count=Count(\"exam\"))\n )\n template_name = \"education/courses.html\"\n\n def get_ordering(self) -> str:\n return \"name\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"courses\": (\n {\n \"course_code\": x.course_code,\n \"name\": x.name,\n \"categories\": x.categories.all(),\n \"document_count\": sum(\n [\n x.summary_count,\n x.exam_count,\n ]\n + [\n c.summary_set.filter(accepted=True).count()\n + c.exam_set.filter(accepted=True).count()\n for c in x.old_courses.all()\n ]\n ),\n \"url\": x.get_absolute_url(),\n }\n for x in context[\"object_list\"]\n ),\n \"categories\": Category.objects.all(),\n }\n )\n return context\n\n\nclass CourseDetailView(DetailView):\n \"\"\"Render the detail page of one specific course.\"\"\"\n\n model = Course\n context_object_name = \"course\"\n template_name = \"education/course.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n obj = context[\"course\"]\n courses = list(obj.old_courses.all())\n courses.append(obj)\n items = {}\n for course in courses:\n for summary in course.summary_set.filter(accepted=True):\n if summary.year not in items:\n items[summary.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[summary.year][\"summaries\"].append(\n {\n \"year\": summary.year,\n \"name\": summary.name,\n \"language\": summary.language,\n \"id\": summary.id,\n }\n )\n for exam in course.exam_set.filter(accepted=True):\n if exam.year not in items:\n items[exam.year] = {\n \"summaries\": [],\n \"exams\": [],\n \"legacy\": course if course.pk != obj.pk else None,\n }\n items[exam.year][\"exams\"].append(\n {\n \"type\": \"exam\",\n \"year\": exam.year,\n \"name\": f\"{exam.get_type_display()} {exam.name}\",\n \"language\": exam.language,\n \"id\": exam.id,\n }\n )\n context.update({\"items\": sorted(items.items(), key=lambda x: x[0])})\n return context\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamDetailView(DetailView):\n \"\"\"Fetch and output the specified exam.\"\"\"\n\n model = Exam\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n obj = response.context_data[\"object\"]\n obj.download_count += 1\n obj.save()\n\n ext = os.path.splitext(obj.file.name)[1]\n filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n return redirect(get_media_url(obj.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryDetailView(DetailView):\n \"\"\"Fetch and output the specified summary.\"\"\"\n\n model = Summary\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n response = super().get(request, *args, **kwargs)\n obj = response.context_data[\"object\"]\n obj.download_count += 1\n obj.save()\n\n ext = os.path.splitext(obj.file.name)[1]\n filename = f\"{obj.course.name}-summary{obj.year}{ext}\"\n return redirect(get_media_url(obj.file, filename))\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass ExamCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new exam.\"\"\"\n\n model = Exam\n form_class = AddExamForm\n template_name = \"education/add_exam.html\"\n success_url = reverse_lazy(\"education:submit-exam\")\n success_message = _(\"Exam submitted successfully.\")\n\n def get_initial(self) -> dict:\n initial = super().get_initial()\n initial[\"exam_date\"] = date.today()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\n@method_decorator(membership_required, \"dispatch\")\nclass SummaryCreateView(SuccessMessageMixin, CreateView):\n \"\"\"Render the form to submit a new summary.\"\"\"\n\n model = Summary\n form_class = AddSummaryForm\n template_name = \"education/add_summary.html\"\n success_url = reverse_lazy(\"education:submit-summary\")\n success_message = _(\"Summary submitted successfully.\")\n\n def get_initial(self):\n initial = super().get_initial()\n initial[\"author\"] = self.request.member.get_full_name()\n initial[\"course\"] = self.kwargs.get(\"pk\", None)\n return initial\n\n def form_valid(self, form) -> HttpResponse:\n self.object = form.save(commit=False)\n self.object.uploader = self.request.member\n self.object.uploader_date = datetime.now()\n self.object.save()\n emails.send_document_notification(self.object)\n return super().form_valid(form)\n\n\n@method_decorator(login_required, \"dispatch\")\nclass BookInfoView(TemplateView):\n \"\"\"Render a page with information about book sale.\n\n Only available to members and to-be members\n \"\"\"\n\n template_name = \"education/books.html\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if request.member.has_active_membership() or (\n request.member.earliest_membership\n and request.member.earliest_membership.since > timezone.now().date()\n ):\n return super().dispatch(request, *args, **kwargs)\n raise PermissionDenied\n", "path": "website/education/views.py"}]}
2,754
396
gh_patches_debug_17580
rasdani/github-patches
git_diff
pyca__cryptography-3873
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot use BLAKE2b or BLAKE2s with HMAC Python 3.6.2 on Ubuntu 17.10 Cryptography and dependencies installed via pip in virtualenv cffi==1.10.0 cryptography==2.0.3 pip==9.0.1 setuptools==36.2.7 Steps to reproduce: ``` In [1]: from cryptography.hazmat.backends import default_backend ...: from cryptography.hazmat.primitives import hashes ...: from cryptography.hazmat.primitives.hmac import HMAC ...: ...: backend = default_backend() ...: In [2]: hmac = HMAC(b'\x00'*32, hashes.SHA256(), backend) # just fine In [3]: hmac = HMAC(b'\x00'*32, hashes.BLAKE2s(digest_size=32), backend) --------------------------------------------------------------------------- UnsupportedAlgorithm Traceback (most recent call last) <ipython-input-3-61f273a52c45> in <module>() ----> 1 hmac = HMAC(b'\x00'*32, hashes.BLAKE2s(digest_size=32), backend) ~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/primitives/hmac.py in __init__(self, key, algorithm, backend, ctx) 30 self._key = key 31 if ctx is None: ---> 32 self._ctx = self._backend.create_hmac_ctx(key, self.algorithm) 33 else: 34 self._ctx = ctx ~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py in create_hmac_ctx(self, key, algorithm) 176 177 def create_hmac_ctx(self, key, algorithm): --> 178 return _HMACContext(self, key, algorithm) 179 180 def _build_openssl_digest_name(self, algorithm): ~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/hmac.py in __init__(self, backend, key, algorithm, ctx) 32 "{0} is not a supported hash on this backend.".format( 33 algorithm.name), ---> 34 _Reasons.UNSUPPORTED_HASH 35 ) 36 res = self._backend._lib.HMAC_Init_ex( UnsupportedAlgorithm: blake2s is not a supported hash on this backend. ``` </issue> <code> [start of src/cryptography/hazmat/backends/openssl/hmac.py] 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 8 from cryptography import utils 9 from cryptography.exceptions import ( 10 InvalidSignature, UnsupportedAlgorithm, _Reasons 11 ) 12 from cryptography.hazmat.primitives import constant_time, hashes, mac 13 14 15 @utils.register_interface(mac.MACContext) 16 @utils.register_interface(hashes.HashContext) 17 class _HMACContext(object): 18 def __init__(self, backend, key, algorithm, ctx=None): 19 self._algorithm = algorithm 20 self._backend = backend 21 22 if ctx is None: 23 ctx = self._backend._lib.Cryptography_HMAC_CTX_new() 24 self._backend.openssl_assert(ctx != self._backend._ffi.NULL) 25 ctx = self._backend._ffi.gc( 26 ctx, self._backend._lib.Cryptography_HMAC_CTX_free 27 ) 28 evp_md = self._backend._lib.EVP_get_digestbyname( 29 algorithm.name.encode('ascii')) 30 if evp_md == self._backend._ffi.NULL: 31 raise UnsupportedAlgorithm( 32 "{0} is not a supported hash on this backend.".format( 33 algorithm.name), 34 _Reasons.UNSUPPORTED_HASH 35 ) 36 res = self._backend._lib.HMAC_Init_ex( 37 ctx, key, len(key), evp_md, self._backend._ffi.NULL 38 ) 39 self._backend.openssl_assert(res != 0) 40 41 self._ctx = ctx 42 self._key = key 43 44 algorithm = utils.read_only_property("_algorithm") 45 46 def copy(self): 47 copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new() 48 self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL) 49 copied_ctx = self._backend._ffi.gc( 50 copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free 51 ) 52 res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx) 53 self._backend.openssl_assert(res != 0) 54 return _HMACContext( 55 self._backend, self._key, self.algorithm, ctx=copied_ctx 56 ) 57 58 def update(self, data): 59 res = self._backend._lib.HMAC_Update(self._ctx, data, len(data)) 60 self._backend.openssl_assert(res != 0) 61 62 def finalize(self): 63 buf = self._backend._ffi.new("unsigned char[]", 64 self._backend._lib.EVP_MAX_MD_SIZE) 65 outlen = self._backend._ffi.new("unsigned int *") 66 res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen) 67 self._backend.openssl_assert(res != 0) 68 self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size) 69 return self._backend._ffi.buffer(buf)[:outlen[0]] 70 71 def verify(self, signature): 72 digest = self.finalize() 73 if not constant_time.bytes_eq(digest, signature): 74 raise InvalidSignature("Signature did not match digest.") 75 [end of src/cryptography/hazmat/backends/openssl/hmac.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cryptography/hazmat/backends/openssl/hmac.py b/src/cryptography/hazmat/backends/openssl/hmac.py --- a/src/cryptography/hazmat/backends/openssl/hmac.py +++ b/src/cryptography/hazmat/backends/openssl/hmac.py @@ -25,12 +25,11 @@ ctx = self._backend._ffi.gc( ctx, self._backend._lib.Cryptography_HMAC_CTX_free ) - evp_md = self._backend._lib.EVP_get_digestbyname( - algorithm.name.encode('ascii')) + name = self._backend._build_openssl_digest_name(algorithm) + evp_md = self._backend._lib.EVP_get_digestbyname(name) if evp_md == self._backend._ffi.NULL: raise UnsupportedAlgorithm( - "{0} is not a supported hash on this backend.".format( - algorithm.name), + "{0} is not a supported hash on this backend".format(name), _Reasons.UNSUPPORTED_HASH ) res = self._backend._lib.HMAC_Init_ex(
{"golden_diff": "diff --git a/src/cryptography/hazmat/backends/openssl/hmac.py b/src/cryptography/hazmat/backends/openssl/hmac.py\n--- a/src/cryptography/hazmat/backends/openssl/hmac.py\n+++ b/src/cryptography/hazmat/backends/openssl/hmac.py\n@@ -25,12 +25,11 @@\n ctx = self._backend._ffi.gc(\n ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n- evp_md = self._backend._lib.EVP_get_digestbyname(\n- algorithm.name.encode('ascii'))\n+ name = self._backend._build_openssl_digest_name(algorithm)\n+ evp_md = self._backend._lib.EVP_get_digestbyname(name)\n if evp_md == self._backend._ffi.NULL:\n raise UnsupportedAlgorithm(\n- \"{0} is not a supported hash on this backend.\".format(\n- algorithm.name),\n+ \"{0} is not a supported hash on this backend\".format(name),\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._backend._lib.HMAC_Init_ex(\n", "issue": "Cannot use BLAKE2b or BLAKE2s with HMAC\nPython 3.6.2 on Ubuntu 17.10\r\nCryptography and dependencies installed via pip in virtualenv\r\ncffi==1.10.0\r\ncryptography==2.0.3\r\npip==9.0.1\r\nsetuptools==36.2.7\r\n\r\nSteps to reproduce:\r\n```\r\nIn [1]: from cryptography.hazmat.backends import default_backend\r\n ...: from cryptography.hazmat.primitives import hashes\r\n ...: from cryptography.hazmat.primitives.hmac import HMAC\r\n ...: \r\n ...: backend = default_backend()\r\n ...: \r\n\r\nIn [2]: hmac = HMAC(b'\\x00'*32, hashes.SHA256(), backend) # just fine\r\n\r\nIn [3]: hmac = HMAC(b'\\x00'*32, hashes.BLAKE2s(digest_size=32), backend)\r\n---------------------------------------------------------------------------\r\nUnsupportedAlgorithm Traceback (most recent call last)\r\n<ipython-input-3-61f273a52c45> in <module>()\r\n----> 1 hmac = HMAC(b'\\x00'*32, hashes.BLAKE2s(digest_size=32), backend)\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/primitives/hmac.py in __init__(self, key, algorithm, backend, ctx)\r\n 30 self._key = key\r\n 31 if ctx is None:\r\n---> 32 self._ctx = self._backend.create_hmac_ctx(key, self.algorithm)\r\n 33 else:\r\n 34 self._ctx = ctx\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py in create_hmac_ctx(self, key, algorithm)\r\n 176 \r\n 177 def create_hmac_ctx(self, key, algorithm):\r\n--> 178 return _HMACContext(self, key, algorithm)\r\n 179 \r\n 180 def _build_openssl_digest_name(self, algorithm):\r\n\r\n~/.venvs/master/lib/python3.6/site-packages/cryptography/hazmat/backends/openssl/hmac.py in __init__(self, backend, key, algorithm, ctx)\r\n 32 \"{0} is not a supported hash on this backend.\".format(\r\n 33 algorithm.name),\r\n---> 34 _Reasons.UNSUPPORTED_HASH\r\n 35 )\r\n 36 res = self._backend._lib.HMAC_Init_ex(\r\n\r\nUnsupportedAlgorithm: blake2s is not a supported hash on this backend.\r\n```\r\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n InvalidSignature, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.primitives import constant_time, hashes, mac\n\n\[email protected]_interface(mac.MACContext)\[email protected]_interface(hashes.HashContext)\nclass _HMACContext(object):\n def __init__(self, backend, key, algorithm, ctx=None):\n self._algorithm = algorithm\n self._backend = backend\n\n if ctx is None:\n ctx = self._backend._lib.Cryptography_HMAC_CTX_new()\n self._backend.openssl_assert(ctx != self._backend._ffi.NULL)\n ctx = self._backend._ffi.gc(\n ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n evp_md = self._backend._lib.EVP_get_digestbyname(\n algorithm.name.encode('ascii'))\n if evp_md == self._backend._ffi.NULL:\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend.\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n res = self._backend._lib.HMAC_Init_ex(\n ctx, key, len(key), evp_md, self._backend._ffi.NULL\n )\n self._backend.openssl_assert(res != 0)\n\n self._ctx = ctx\n self._key = key\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n def copy(self):\n copied_ctx = self._backend._lib.Cryptography_HMAC_CTX_new()\n self._backend.openssl_assert(copied_ctx != self._backend._ffi.NULL)\n copied_ctx = self._backend._ffi.gc(\n copied_ctx, self._backend._lib.Cryptography_HMAC_CTX_free\n )\n res = self._backend._lib.HMAC_CTX_copy(copied_ctx, self._ctx)\n self._backend.openssl_assert(res != 0)\n return _HMACContext(\n self._backend, self._key, self.algorithm, ctx=copied_ctx\n )\n\n def update(self, data):\n res = self._backend._lib.HMAC_Update(self._ctx, data, len(data))\n self._backend.openssl_assert(res != 0)\n\n def finalize(self):\n buf = self._backend._ffi.new(\"unsigned char[]\",\n self._backend._lib.EVP_MAX_MD_SIZE)\n outlen = self._backend._ffi.new(\"unsigned int *\")\n res = self._backend._lib.HMAC_Final(self._ctx, buf, outlen)\n self._backend.openssl_assert(res != 0)\n self._backend.openssl_assert(outlen[0] == self.algorithm.digest_size)\n return self._backend._ffi.buffer(buf)[:outlen[0]]\n\n def verify(self, signature):\n digest = self.finalize()\n if not constant_time.bytes_eq(digest, signature):\n raise InvalidSignature(\"Signature did not match digest.\")\n", "path": "src/cryptography/hazmat/backends/openssl/hmac.py"}]}
1,958
244
gh_patches_debug_10384
rasdani/github-patches
git_diff
shuup__shuup-1558
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve the way permissionas are managed in admin Currently, use has to use a select2 component with a lot of options and this is super boring, tedious and time consuming. Can we use a list of check boxes instead? </issue> <code> [start of shuup/admin/modules/permission_groups/views/edit.py] 1 # -*- coding: utf-8 -*- 2 # This file is part of Shuup. 3 # 4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved. 5 # 6 # This source code is licensed under the OSL-3.0 license found in the 7 # LICENSE file in the root directory of this source tree. 8 from __future__ import unicode_literals 9 10 from django import forms 11 from django.contrib.auth import get_user_model 12 from django.contrib.auth.models import Group as PermissionGroup 13 from django.utils.encoding import force_text 14 from django.utils.translation import ugettext_lazy as _ 15 16 from shuup.admin.forms.fields import Select2MultipleField 17 from shuup.admin.module_registry import get_modules 18 from shuup.admin.utils.permissions import get_permission_object_from_string 19 from shuup.admin.utils.views import CreateOrUpdateView 20 21 22 class PermissionGroupForm(forms.ModelForm): 23 class Meta: 24 model = PermissionGroup 25 exclude = ("permissions",) 26 27 def __init__(self, *args, **kwargs): 28 super(PermissionGroupForm, self).__init__(*args, **kwargs) 29 initial_permissions = self._get_initial_permissions() 30 self.fields["name"].help_text = _("The permission group name.") 31 self.fields["modules"] = forms.MultipleChoiceField( 32 choices=sorted(self._get_module_choices()), 33 initial=self._get_enabled_modules(initial_permissions), 34 required=False, 35 label=_("Module Permissions"), 36 help_text=_( 37 "Select the modules that should be accessible by this permission group. " 38 "Modules with the same permissions as selected modules will be added automatically." 39 ) 40 ) 41 initial_members = self._get_initial_members() 42 members_field = Select2MultipleField( 43 model=get_user_model(), 44 initial=[member.pk for member in initial_members], 45 required=False, 46 label=_("Members"), 47 help_text=_( 48 "Set the users that belong to this permission group." 49 ) 50 ) 51 members_field.widget.choices = [(member.pk, force_text(member)) for member in initial_members] 52 self.fields["members"] = members_field 53 54 def _get_module_choices(self): 55 return set((force_text(m.name), force_text(m.name)) for m in get_modules() if m.name != "_Base_") 56 57 def _get_initial_members(self): 58 if self.instance.pk: 59 return self.instance.user_set.all() 60 else: 61 return [] 62 63 def _get_initial_permissions(self): 64 permissions = set() 65 if self.instance.pk: 66 for perm in self.instance.permissions.all(): 67 name, module, _ = perm.natural_key() 68 permissions.add("%s.%s" % (module, name)) 69 return permissions 70 71 def _get_enabled_modules(self, permissions): 72 if not self.instance.pk: 73 return [] 74 permissions = set(permissions) 75 modules = [] 76 for module in get_modules(): 77 # Ignore modules that haven't configured a name 78 if module.name != "_Base_" and set(module.get_required_permissions()).issubset(permissions): 79 modules.append(force_text(module.name)) 80 return modules 81 82 def _get_required_permissions(self, modules): 83 permissions = set() 84 for module in [m for m in get_modules() if m.name in modules]: 85 permissions.update(set(module.get_required_permissions())) 86 return permissions 87 88 def clean_members(self): 89 members = self.cleaned_data.get("members", []) 90 91 return get_user_model().objects.filter(pk__in=members).all() 92 93 def clean(self): 94 cleaned_data = super(PermissionGroupForm, self).clean() 95 96 permissions = set() 97 modules = cleaned_data.pop("modules", []) 98 required_permissions = self._get_required_permissions(modules) 99 100 for permission in required_permissions: 101 permissions.add(get_permission_object_from_string(permission)) 102 103 cleaned_data["required_permissions"] = permissions 104 105 return cleaned_data 106 107 def save(self): 108 obj = super(PermissionGroupForm, self).save() 109 obj.permissions = set(self.cleaned_data["required_permissions"]) 110 obj.user_set = set(self.cleaned_data["members"]) 111 return obj 112 113 114 class PermissionGroupEditView(CreateOrUpdateView): 115 model = PermissionGroup 116 form_class = PermissionGroupForm 117 template_name = "shuup/admin/permission_groups/edit.jinja" 118 context_object_name = "permission_group" 119 add_form_errors_as_messages = True 120 [end of shuup/admin/modules/permission_groups/views/edit.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shuup/admin/modules/permission_groups/views/edit.py b/shuup/admin/modules/permission_groups/views/edit.py --- a/shuup/admin/modules/permission_groups/views/edit.py +++ b/shuup/admin/modules/permission_groups/views/edit.py @@ -36,7 +36,8 @@ help_text=_( "Select the modules that should be accessible by this permission group. " "Modules with the same permissions as selected modules will be added automatically." - ) + ), + widget=forms.CheckboxSelectMultiple ) initial_members = self._get_initial_members() members_field = Select2MultipleField(
{"golden_diff": "diff --git a/shuup/admin/modules/permission_groups/views/edit.py b/shuup/admin/modules/permission_groups/views/edit.py\n--- a/shuup/admin/modules/permission_groups/views/edit.py\n+++ b/shuup/admin/modules/permission_groups/views/edit.py\n@@ -36,7 +36,8 @@\n help_text=_(\n \"Select the modules that should be accessible by this permission group. \"\n \"Modules with the same permissions as selected modules will be added automatically.\"\n- )\n+ ),\n+ widget=forms.CheckboxSelectMultiple\n )\n initial_members = self._get_initial_members()\n members_field = Select2MultipleField(\n", "issue": "Improve the way permissionas are managed in admin\nCurrently, use has to use a select2 component with a lot of options and this is super boring, tedious and time consuming. Can we use a list of check boxes instead?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\nfrom __future__ import unicode_literals\n\nfrom django import forms\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group as PermissionGroup\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shuup.admin.forms.fields import Select2MultipleField\nfrom shuup.admin.module_registry import get_modules\nfrom shuup.admin.utils.permissions import get_permission_object_from_string\nfrom shuup.admin.utils.views import CreateOrUpdateView\n\n\nclass PermissionGroupForm(forms.ModelForm):\n class Meta:\n model = PermissionGroup\n exclude = (\"permissions\",)\n\n def __init__(self, *args, **kwargs):\n super(PermissionGroupForm, self).__init__(*args, **kwargs)\n initial_permissions = self._get_initial_permissions()\n self.fields[\"name\"].help_text = _(\"The permission group name.\")\n self.fields[\"modules\"] = forms.MultipleChoiceField(\n choices=sorted(self._get_module_choices()),\n initial=self._get_enabled_modules(initial_permissions),\n required=False,\n label=_(\"Module Permissions\"),\n help_text=_(\n \"Select the modules that should be accessible by this permission group. \"\n \"Modules with the same permissions as selected modules will be added automatically.\"\n )\n )\n initial_members = self._get_initial_members()\n members_field = Select2MultipleField(\n model=get_user_model(),\n initial=[member.pk for member in initial_members],\n required=False,\n label=_(\"Members\"),\n help_text=_(\n \"Set the users that belong to this permission group.\"\n )\n )\n members_field.widget.choices = [(member.pk, force_text(member)) for member in initial_members]\n self.fields[\"members\"] = members_field\n\n def _get_module_choices(self):\n return set((force_text(m.name), force_text(m.name)) for m in get_modules() if m.name != \"_Base_\")\n\n def _get_initial_members(self):\n if self.instance.pk:\n return self.instance.user_set.all()\n else:\n return []\n\n def _get_initial_permissions(self):\n permissions = set()\n if self.instance.pk:\n for perm in self.instance.permissions.all():\n name, module, _ = perm.natural_key()\n permissions.add(\"%s.%s\" % (module, name))\n return permissions\n\n def _get_enabled_modules(self, permissions):\n if not self.instance.pk:\n return []\n permissions = set(permissions)\n modules = []\n for module in get_modules():\n # Ignore modules that haven't configured a name\n if module.name != \"_Base_\" and set(module.get_required_permissions()).issubset(permissions):\n modules.append(force_text(module.name))\n return modules\n\n def _get_required_permissions(self, modules):\n permissions = set()\n for module in [m for m in get_modules() if m.name in modules]:\n permissions.update(set(module.get_required_permissions()))\n return permissions\n\n def clean_members(self):\n members = self.cleaned_data.get(\"members\", [])\n\n return get_user_model().objects.filter(pk__in=members).all()\n\n def clean(self):\n cleaned_data = super(PermissionGroupForm, self).clean()\n\n permissions = set()\n modules = cleaned_data.pop(\"modules\", [])\n required_permissions = self._get_required_permissions(modules)\n\n for permission in required_permissions:\n permissions.add(get_permission_object_from_string(permission))\n\n cleaned_data[\"required_permissions\"] = permissions\n\n return cleaned_data\n\n def save(self):\n obj = super(PermissionGroupForm, self).save()\n obj.permissions = set(self.cleaned_data[\"required_permissions\"])\n obj.user_set = set(self.cleaned_data[\"members\"])\n return obj\n\n\nclass PermissionGroupEditView(CreateOrUpdateView):\n model = PermissionGroup\n form_class = PermissionGroupForm\n template_name = \"shuup/admin/permission_groups/edit.jinja\"\n context_object_name = \"permission_group\"\n add_form_errors_as_messages = True\n", "path": "shuup/admin/modules/permission_groups/views/edit.py"}]}
1,763
139
gh_patches_debug_47834
rasdani/github-patches
git_diff
getredash__redash-3877
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> API Key authentication should take precedence over cookies Currently when you're logged in as a user, you can't use embeds or shared dashboards for which you do not have access -- even if you provide the API key. This is because the current user is being defined by the session cookie and the API key is being ignored. We need to change this behavior so the API key takes precedence. From a quick look at [Flask-Login](https://flask-login.readthedocs.io)'s documentation, it seems that it calls `user_loader` first and only if this fails will try the `requets_loader`. Unless this is configurable, we might need to change `user_loader` to always return `None` and implement our own logic in `request_loader`. </issue> <code> [start of redash/authentication/__init__.py] 1 import hashlib 2 import hmac 3 import logging 4 import time 5 from urlparse import urlsplit, urlunsplit 6 7 from flask import jsonify, redirect, request, url_for 8 from flask_login import LoginManager, login_user, logout_user, user_logged_in 9 from redash import models, settings 10 from redash.authentication import jwt_auth 11 from redash.authentication.org_resolving import current_org 12 from redash.settings.organization import settings as org_settings 13 from redash.tasks import record_event 14 from sqlalchemy.orm.exc import NoResultFound 15 from werkzeug.exceptions import Unauthorized 16 17 login_manager = LoginManager() 18 logger = logging.getLogger('authentication') 19 20 21 def get_login_url(external=False, next="/"): 22 if settings.MULTI_ORG and current_org == None: 23 login_url = '/' 24 elif settings.MULTI_ORG: 25 login_url = url_for('redash.login', org_slug=current_org.slug, next=next, _external=external) 26 else: 27 login_url = url_for('redash.login', next=next, _external=external) 28 29 return login_url 30 31 32 def sign(key, path, expires): 33 if not key: 34 return None 35 36 h = hmac.new(str(key), msg=path, digestmod=hashlib.sha1) 37 h.update(str(expires)) 38 39 return h.hexdigest() 40 41 42 @login_manager.user_loader 43 def load_user(user_id_with_identity): 44 org = current_org._get_current_object() 45 46 try: 47 user_id, _ = user_id_with_identity.split("-") 48 user = models.User.get_by_id_and_org(user_id, org) 49 if user.is_disabled or user.get_id() != user_id_with_identity: 50 return None 51 52 return user 53 except (models.NoResultFound, ValueError, AttributeError): 54 return None 55 56 57 def request_loader(request): 58 user = None 59 if settings.AUTH_TYPE == 'hmac': 60 user = hmac_load_user_from_request(request) 61 elif settings.AUTH_TYPE == 'api_key': 62 user = api_key_load_user_from_request(request) 63 else: 64 logger.warning("Unknown authentication type ({}). Using default (HMAC).".format(settings.AUTH_TYPE)) 65 user = hmac_load_user_from_request(request) 66 67 if org_settings['auth_jwt_login_enabled'] and user is None: 68 user = jwt_token_load_user_from_request(request) 69 return user 70 71 72 def hmac_load_user_from_request(request): 73 signature = request.args.get('signature') 74 expires = float(request.args.get('expires') or 0) 75 query_id = request.view_args.get('query_id', None) 76 user_id = request.args.get('user_id', None) 77 78 # TODO: 3600 should be a setting 79 if signature and time.time() < expires <= time.time() + 3600: 80 if user_id: 81 user = models.User.query.get(user_id) 82 calculated_signature = sign(user.api_key, request.path, expires) 83 84 if user.api_key and signature == calculated_signature: 85 return user 86 87 if query_id: 88 query = models.Query.query.filter(models.Query.id == query_id).one() 89 calculated_signature = sign(query.api_key, request.path, expires) 90 91 if query.api_key and signature == calculated_signature: 92 return models.ApiUser(query.api_key, query.org, query.groups.keys(), name="ApiKey: Query {}".format(query.id)) 93 94 return None 95 96 97 def get_user_from_api_key(api_key, query_id): 98 if not api_key: 99 return None 100 101 user = None 102 103 # TODO: once we switch all api key storage into the ApiKey model, this code will be much simplified 104 org = current_org._get_current_object() 105 try: 106 user = models.User.get_by_api_key_and_org(api_key, org) 107 if user.is_disabled: 108 user = None 109 except models.NoResultFound: 110 try: 111 api_key = models.ApiKey.get_by_api_key(api_key) 112 user = models.ApiUser(api_key, api_key.org, []) 113 except models.NoResultFound: 114 if query_id: 115 query = models.Query.get_by_id_and_org(query_id, org) 116 if query and query.api_key == api_key: 117 user = models.ApiUser(api_key, query.org, query.groups.keys(), name="ApiKey: Query {}".format(query.id)) 118 119 return user 120 121 122 def get_api_key_from_request(request): 123 api_key = request.args.get('api_key', None) 124 125 if api_key is not None: 126 return api_key 127 128 if request.headers.get('Authorization'): 129 auth_header = request.headers.get('Authorization') 130 api_key = auth_header.replace('Key ', '', 1) 131 elif request.view_args is not None and request.view_args.get('token'): 132 api_key = request.view_args['token'] 133 134 return api_key 135 136 137 def api_key_load_user_from_request(request): 138 api_key = get_api_key_from_request(request) 139 if request.view_args is not None: 140 query_id = request.view_args.get('query_id', None) 141 user = get_user_from_api_key(api_key, query_id) 142 else: 143 user = None 144 145 return user 146 147 148 def jwt_token_load_user_from_request(request): 149 org = current_org._get_current_object() 150 151 payload = None 152 153 if org_settings['auth_jwt_auth_cookie_name']: 154 jwt_token = request.cookies.get(org_settings['auth_jwt_auth_cookie_name'], None) 155 elif org_settings['auth_jwt_auth_header_name']: 156 jwt_token = request.headers.get(org_settings['auth_jwt_auth_header_name'], None) 157 else: 158 return None 159 160 if jwt_token: 161 payload, token_is_valid = jwt_auth.verify_jwt_token( 162 jwt_token, 163 expected_issuer=org_settings['auth_jwt_auth_issuer'], 164 expected_audience=org_settings['auth_jwt_auth_audience'], 165 algorithms=org_settings['auth_jwt_auth_algorithms'], 166 public_certs_url=org_settings['auth_jwt_auth_public_certs_url'], 167 ) 168 if not token_is_valid: 169 raise Unauthorized('Invalid JWT token') 170 171 if not payload: 172 return 173 174 try: 175 user = models.User.get_by_email_and_org(payload['email'], org) 176 except models.NoResultFound: 177 user = create_and_login_user(current_org, payload['email'], payload['email']) 178 179 return user 180 181 182 def log_user_logged_in(app, user): 183 event = { 184 'org_id': user.org_id, 185 'user_id': user.id, 186 'action': 'login', 187 'object_type': 'redash', 188 'timestamp': int(time.time()), 189 'user_agent': request.user_agent.string, 190 'ip': request.remote_addr 191 } 192 193 record_event.delay(event) 194 195 196 @login_manager.unauthorized_handler 197 def redirect_to_login(): 198 if request.is_xhr or '/api/' in request.path: 199 response = jsonify({'message': "Couldn't find resource. Please login and try again."}) 200 response.status_code = 404 201 return response 202 203 login_url = get_login_url(next=request.url, external=False) 204 205 return redirect(login_url) 206 207 208 def logout_and_redirect_to_index(): 209 logout_user() 210 211 if settings.MULTI_ORG and current_org == None: 212 index_url = '/' 213 elif settings.MULTI_ORG: 214 index_url = url_for('redash.index', org_slug=current_org.slug, _external=False) 215 else: 216 index_url = url_for('redash.index', _external=False) 217 218 return redirect(index_url) 219 220 221 def init_app(app): 222 from redash.authentication import google_oauth, saml_auth, remote_user_auth, ldap_auth 223 224 login_manager.init_app(app) 225 login_manager.anonymous_user = models.AnonymousUser 226 227 app.register_blueprint(google_oauth.blueprint) 228 app.register_blueprint(saml_auth.blueprint) 229 app.register_blueprint(remote_user_auth.blueprint) 230 app.register_blueprint(ldap_auth.blueprint) 231 232 user_logged_in.connect(log_user_logged_in) 233 login_manager.request_loader(request_loader) 234 235 236 def create_and_login_user(org, name, email, picture=None): 237 try: 238 user_object = models.User.get_by_email_and_org(email, org) 239 if user_object.is_disabled: 240 return None 241 if user_object.is_invitation_pending: 242 user_object.is_invitation_pending = False 243 models.db.session.commit() 244 if user_object.name != name: 245 logger.debug("Updating user name (%r -> %r)", user_object.name, name) 246 user_object.name = name 247 models.db.session.commit() 248 except NoResultFound: 249 logger.debug("Creating user object (%r)", name) 250 user_object = models.User(org=org, name=name, email=email, is_invitation_pending=False, 251 _profile_image_url=picture, group_ids=[org.default_group.id]) 252 models.db.session.add(user_object) 253 models.db.session.commit() 254 255 login_user(user_object, remember=True) 256 257 return user_object 258 259 260 def get_next_path(unsafe_next_path): 261 if not unsafe_next_path: 262 return '' 263 264 # Preventing open redirection attacks 265 parts = list(urlsplit(unsafe_next_path)) 266 parts[0] = '' # clear scheme 267 parts[1] = '' # clear netloc 268 safe_next_path = urlunsplit(parts) 269 270 return safe_next_path 271 [end of redash/authentication/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redash/authentication/__init__.py b/redash/authentication/__init__.py --- a/redash/authentication/__init__.py +++ b/redash/authentication/__init__.py @@ -41,6 +41,10 @@ @login_manager.user_loader def load_user(user_id_with_identity): + user = api_key_load_user_from_request(request) + if user: + return user + org = current_org._get_current_object() try:
{"golden_diff": "diff --git a/redash/authentication/__init__.py b/redash/authentication/__init__.py\n--- a/redash/authentication/__init__.py\n+++ b/redash/authentication/__init__.py\n@@ -41,6 +41,10 @@\n \n @login_manager.user_loader\n def load_user(user_id_with_identity):\n+ user = api_key_load_user_from_request(request)\n+ if user:\n+ return user\n+\n org = current_org._get_current_object()\n \n try:\n", "issue": "API Key authentication should take precedence over cookies\nCurrently when you're logged in as a user, you can't use embeds or shared dashboards for which you do not have access -- even if you provide the API key. This is because the current user is being defined by the session cookie and the API key is being ignored.\r\n\r\nWe need to change this behavior so the API key takes precedence. From a quick look at [Flask-Login](https://flask-login.readthedocs.io)'s documentation, it seems that it calls `user_loader` first and only if this fails will try the `requets_loader`.\r\n\r\nUnless this is configurable, we might need to change `user_loader` to always return `None` and implement our own logic in `request_loader`.\n", "before_files": [{"content": "import hashlib\nimport hmac\nimport logging\nimport time\nfrom urlparse import urlsplit, urlunsplit\n\nfrom flask import jsonify, redirect, request, url_for\nfrom flask_login import LoginManager, login_user, logout_user, user_logged_in\nfrom redash import models, settings\nfrom redash.authentication import jwt_auth\nfrom redash.authentication.org_resolving import current_org\nfrom redash.settings.organization import settings as org_settings\nfrom redash.tasks import record_event\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom werkzeug.exceptions import Unauthorized\n\nlogin_manager = LoginManager()\nlogger = logging.getLogger('authentication')\n\n\ndef get_login_url(external=False, next=\"/\"):\n if settings.MULTI_ORG and current_org == None:\n login_url = '/'\n elif settings.MULTI_ORG:\n login_url = url_for('redash.login', org_slug=current_org.slug, next=next, _external=external)\n else:\n login_url = url_for('redash.login', next=next, _external=external)\n\n return login_url\n\n\ndef sign(key, path, expires):\n if not key:\n return None\n\n h = hmac.new(str(key), msg=path, digestmod=hashlib.sha1)\n h.update(str(expires))\n\n return h.hexdigest()\n\n\n@login_manager.user_loader\ndef load_user(user_id_with_identity):\n org = current_org._get_current_object()\n\n try:\n user_id, _ = user_id_with_identity.split(\"-\")\n user = models.User.get_by_id_and_org(user_id, org)\n if user.is_disabled or user.get_id() != user_id_with_identity:\n return None\n\n return user\n except (models.NoResultFound, ValueError, AttributeError):\n return None\n\n\ndef request_loader(request):\n user = None\n if settings.AUTH_TYPE == 'hmac':\n user = hmac_load_user_from_request(request)\n elif settings.AUTH_TYPE == 'api_key':\n user = api_key_load_user_from_request(request)\n else:\n logger.warning(\"Unknown authentication type ({}). Using default (HMAC).\".format(settings.AUTH_TYPE))\n user = hmac_load_user_from_request(request)\n\n if org_settings['auth_jwt_login_enabled'] and user is None:\n user = jwt_token_load_user_from_request(request)\n return user\n\n\ndef hmac_load_user_from_request(request):\n signature = request.args.get('signature')\n expires = float(request.args.get('expires') or 0)\n query_id = request.view_args.get('query_id', None)\n user_id = request.args.get('user_id', None)\n\n # TODO: 3600 should be a setting\n if signature and time.time() < expires <= time.time() + 3600:\n if user_id:\n user = models.User.query.get(user_id)\n calculated_signature = sign(user.api_key, request.path, expires)\n\n if user.api_key and signature == calculated_signature:\n return user\n\n if query_id:\n query = models.Query.query.filter(models.Query.id == query_id).one()\n calculated_signature = sign(query.api_key, request.path, expires)\n\n if query.api_key and signature == calculated_signature:\n return models.ApiUser(query.api_key, query.org, query.groups.keys(), name=\"ApiKey: Query {}\".format(query.id))\n\n return None\n\n\ndef get_user_from_api_key(api_key, query_id):\n if not api_key:\n return None\n\n user = None\n\n # TODO: once we switch all api key storage into the ApiKey model, this code will be much simplified\n org = current_org._get_current_object()\n try:\n user = models.User.get_by_api_key_and_org(api_key, org)\n if user.is_disabled:\n user = None\n except models.NoResultFound:\n try:\n api_key = models.ApiKey.get_by_api_key(api_key)\n user = models.ApiUser(api_key, api_key.org, [])\n except models.NoResultFound:\n if query_id:\n query = models.Query.get_by_id_and_org(query_id, org)\n if query and query.api_key == api_key:\n user = models.ApiUser(api_key, query.org, query.groups.keys(), name=\"ApiKey: Query {}\".format(query.id))\n\n return user\n\n\ndef get_api_key_from_request(request):\n api_key = request.args.get('api_key', None)\n\n if api_key is not None:\n return api_key\n\n if request.headers.get('Authorization'):\n auth_header = request.headers.get('Authorization')\n api_key = auth_header.replace('Key ', '', 1)\n elif request.view_args is not None and request.view_args.get('token'):\n api_key = request.view_args['token']\n\n return api_key\n\n\ndef api_key_load_user_from_request(request):\n api_key = get_api_key_from_request(request)\n if request.view_args is not None:\n query_id = request.view_args.get('query_id', None)\n user = get_user_from_api_key(api_key, query_id)\n else:\n user = None\n\n return user\n\n\ndef jwt_token_load_user_from_request(request):\n org = current_org._get_current_object()\n\n payload = None\n\n if org_settings['auth_jwt_auth_cookie_name']:\n jwt_token = request.cookies.get(org_settings['auth_jwt_auth_cookie_name'], None)\n elif org_settings['auth_jwt_auth_header_name']:\n jwt_token = request.headers.get(org_settings['auth_jwt_auth_header_name'], None)\n else:\n return None\n\n if jwt_token:\n payload, token_is_valid = jwt_auth.verify_jwt_token(\n jwt_token,\n expected_issuer=org_settings['auth_jwt_auth_issuer'],\n expected_audience=org_settings['auth_jwt_auth_audience'],\n algorithms=org_settings['auth_jwt_auth_algorithms'],\n public_certs_url=org_settings['auth_jwt_auth_public_certs_url'],\n )\n if not token_is_valid:\n raise Unauthorized('Invalid JWT token')\n\n if not payload:\n return\n\n try:\n user = models.User.get_by_email_and_org(payload['email'], org)\n except models.NoResultFound:\n user = create_and_login_user(current_org, payload['email'], payload['email'])\n\n return user\n\n\ndef log_user_logged_in(app, user):\n event = {\n 'org_id': user.org_id,\n 'user_id': user.id,\n 'action': 'login',\n 'object_type': 'redash',\n 'timestamp': int(time.time()),\n 'user_agent': request.user_agent.string,\n 'ip': request.remote_addr\n }\n\n record_event.delay(event)\n\n\n@login_manager.unauthorized_handler\ndef redirect_to_login():\n if request.is_xhr or '/api/' in request.path:\n response = jsonify({'message': \"Couldn't find resource. Please login and try again.\"})\n response.status_code = 404\n return response\n\n login_url = get_login_url(next=request.url, external=False)\n\n return redirect(login_url)\n\n\ndef logout_and_redirect_to_index():\n logout_user()\n\n if settings.MULTI_ORG and current_org == None:\n index_url = '/'\n elif settings.MULTI_ORG:\n index_url = url_for('redash.index', org_slug=current_org.slug, _external=False)\n else:\n index_url = url_for('redash.index', _external=False)\n\n return redirect(index_url)\n\n\ndef init_app(app):\n from redash.authentication import google_oauth, saml_auth, remote_user_auth, ldap_auth\n\n login_manager.init_app(app)\n login_manager.anonymous_user = models.AnonymousUser\n\n app.register_blueprint(google_oauth.blueprint)\n app.register_blueprint(saml_auth.blueprint)\n app.register_blueprint(remote_user_auth.blueprint)\n app.register_blueprint(ldap_auth.blueprint)\n\n user_logged_in.connect(log_user_logged_in)\n login_manager.request_loader(request_loader)\n\n\ndef create_and_login_user(org, name, email, picture=None):\n try:\n user_object = models.User.get_by_email_and_org(email, org)\n if user_object.is_disabled:\n return None\n if user_object.is_invitation_pending:\n user_object.is_invitation_pending = False\n models.db.session.commit()\n if user_object.name != name:\n logger.debug(\"Updating user name (%r -> %r)\", user_object.name, name)\n user_object.name = name\n models.db.session.commit()\n except NoResultFound:\n logger.debug(\"Creating user object (%r)\", name)\n user_object = models.User(org=org, name=name, email=email, is_invitation_pending=False,\n _profile_image_url=picture, group_ids=[org.default_group.id])\n models.db.session.add(user_object)\n models.db.session.commit()\n\n login_user(user_object, remember=True)\n\n return user_object\n\n\ndef get_next_path(unsafe_next_path):\n if not unsafe_next_path:\n return ''\n\n # Preventing open redirection attacks\n parts = list(urlsplit(unsafe_next_path))\n parts[0] = '' # clear scheme\n parts[1] = '' # clear netloc\n safe_next_path = urlunsplit(parts)\n\n return safe_next_path\n", "path": "redash/authentication/__init__.py"}]}
3,380
105
gh_patches_debug_29946
rasdani/github-patches
git_diff
nonebot__nonebot2-300
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: Mirai模块处理仅含@Bot的消息时报错 **描述问题:** 对于单独的@bot消息处理时报错。 **如何复现?** 使用mirai模块处理消息,当在群聊中单独@机器人(不跟随其他信息)时消息处理报错 **期望的结果** 能够正常处理 **环境信息:** - OS: CentOS & Winodws - Python Version: 3.9.2 - Nonebot Version: nonebot cli version 0.4.0 **截图** ![image](https://user-images.githubusercontent.com/44700327/112746873-7a2cbd80-8fe4-11eb-9b6a-46620c3ee67f.png) </issue> <code> [start of packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py] 1 import re 2 from functools import wraps 3 from typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypeVar 4 5 import httpx 6 from pydantic import Extra, ValidationError, validate_arguments 7 8 import nonebot.exception as exception 9 from nonebot.log import logger 10 from nonebot.message import handle_event 11 from nonebot.utils import escape_tag, logger_wrapper 12 13 from .event import Event, GroupMessage, MessageEvent, MessageSource 14 from .message import MessageType 15 16 if TYPE_CHECKING: 17 from .bot import Bot 18 19 _AsyncCallable = TypeVar("_AsyncCallable", bound=Callable[..., Coroutine]) 20 _AnyCallable = TypeVar("_AnyCallable", bound=Callable) 21 22 23 class Log: 24 25 @staticmethod 26 def log(level: str, message: str, exception: Optional[Exception] = None): 27 logger = logger_wrapper('MIRAI') 28 message = '<e>' + escape_tag(message) + '</e>' 29 logger(level=level.upper(), message=message, exception=exception) 30 31 @classmethod 32 def info(cls, message: Any): 33 cls.log('INFO', str(message)) 34 35 @classmethod 36 def debug(cls, message: Any): 37 cls.log('DEBUG', str(message)) 38 39 @classmethod 40 def warn(cls, message: Any): 41 cls.log('WARNING', str(message)) 42 43 @classmethod 44 def error(cls, message: Any, exception: Optional[Exception] = None): 45 cls.log('ERROR', str(message), exception=exception) 46 47 48 class ActionFailed(exception.ActionFailed): 49 """ 50 :说明: 51 52 API 请求成功返回数据,但 API 操作失败。 53 """ 54 55 def __init__(self, **kwargs): 56 super().__init__('mirai') 57 self.data = kwargs.copy() 58 59 def __repr__(self): 60 return self.__class__.__name__ + '(%s)' % ', '.join( 61 map(lambda m: '%s=%r' % m, self.data.items())) 62 63 64 class InvalidArgument(exception.AdapterException): 65 """ 66 :说明: 67 68 调用API的参数出错 69 """ 70 71 def __init__(self, **kwargs): 72 super().__init__('mirai') 73 74 75 def catch_network_error(function: _AsyncCallable) -> _AsyncCallable: 76 """ 77 :说明: 78 79 捕捉函数抛出的httpx网络异常并释放 ``NetworkError`` 异常 80 81 处理返回数据, 在code不为0时释放 ``ActionFailed`` 异常 82 83 \:\:\: warning 84 此装饰器只支持使用了httpx的异步函数 85 \:\:\: 86 """ 87 88 @wraps(function) 89 async def wrapper(*args, **kwargs): 90 try: 91 data = await function(*args, **kwargs) 92 except httpx.HTTPError: 93 raise exception.NetworkError('mirai') 94 logger.opt(colors=True).debug('<b>Mirai API returned data:</b> ' 95 f'<y>{escape_tag(str(data))}</y>') 96 if isinstance(data, dict): 97 if data.get('code', 0) != 0: 98 raise ActionFailed(**data) 99 return data 100 101 return wrapper # type: ignore 102 103 104 def argument_validation(function: _AnyCallable) -> _AnyCallable: 105 """ 106 :说明: 107 108 通过函数签名中的类型注解来对传入参数进行运行时校验 109 110 会在参数出错时释放 ``InvalidArgument`` 异常 111 """ 112 function = validate_arguments(config={ 113 'arbitrary_types_allowed': True, 114 'extra': Extra.forbid 115 })(function) 116 117 @wraps(function) 118 def wrapper(*args, **kwargs): 119 try: 120 return function(*args, **kwargs) 121 except ValidationError: 122 raise InvalidArgument 123 124 return wrapper # type: ignore 125 126 127 def process_source(bot: "Bot", event: MessageEvent) -> MessageEvent: 128 source = event.message_chain.extract_first(MessageType.SOURCE) 129 if source is not None: 130 event.source = MessageSource.parse_obj(source.data) 131 return event 132 133 134 def process_at(bot: "Bot", event: GroupMessage) -> GroupMessage: 135 at = event.message_chain.extract_first(MessageType.AT) 136 if at is not None: 137 if at.data['target'] == event.self_id: 138 event.to_me = True 139 else: 140 event.message_chain.insert(0, at) 141 return event 142 143 144 def process_nick(bot: "Bot", event: GroupMessage) -> GroupMessage: 145 plain = event.message_chain.extract_first(MessageType.PLAIN) 146 if plain is not None: 147 text = str(plain) 148 nick_regex = '|'.join(filter(lambda x: x, bot.config.nickname)) 149 matched = re.search(rf"^({nick_regex})([\s,,]*|$)", text, re.IGNORECASE) 150 if matched is not None: 151 event.to_me = True 152 nickname = matched.group(1) 153 Log.info(f'User is calling me {nickname}') 154 plain.data['text'] = text[matched.end():] 155 event.message_chain.insert(0, plain) 156 return event 157 158 159 def process_reply(bot: "Bot", event: GroupMessage) -> GroupMessage: 160 reply = event.message_chain.extract_first(MessageType.QUOTE) 161 if reply is not None: 162 if reply.data['senderId'] == event.self_id: 163 event.to_me = True 164 else: 165 event.message_chain.insert(0, reply) 166 return event 167 168 169 async def process_event(bot: "Bot", event: Event) -> None: 170 if isinstance(event, MessageEvent): 171 event.message_chain.reduce() 172 Log.debug(event.message_chain) 173 event = process_source(bot, event) 174 if isinstance(event, GroupMessage): 175 event = process_nick(bot, event) 176 event = process_at(bot, event) 177 event = process_reply(bot, event) 178 await handle_event(bot, event) [end of packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py --- a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py +++ b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py @@ -11,7 +11,7 @@ from nonebot.utils import escape_tag, logger_wrapper from .event import Event, GroupMessage, MessageEvent, MessageSource -from .message import MessageType +from .message import MessageType, MessageSegment if TYPE_CHECKING: from .bot import Bot @@ -64,7 +64,7 @@ class InvalidArgument(exception.AdapterException): """ :说明: - + 调用API的参数出错 """ @@ -106,7 +106,7 @@ :说明: 通过函数签名中的类型注解来对传入参数进行运行时校验 - + 会在参数出错时释放 ``InvalidArgument`` 异常 """ function = validate_arguments(config={ @@ -138,6 +138,8 @@ event.to_me = True else: event.message_chain.insert(0, at) + if not event.message_chain: + event.message_chain.append(MessageSegment.plain('')) return event @@ -175,4 +177,4 @@ event = process_nick(bot, event) event = process_at(bot, event) event = process_reply(bot, event) - await handle_event(bot, event) \ No newline at end of file + await handle_event(bot, event)
{"golden_diff": "diff --git a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py\n--- a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py\n+++ b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py\n@@ -11,7 +11,7 @@\n from nonebot.utils import escape_tag, logger_wrapper\n \n from .event import Event, GroupMessage, MessageEvent, MessageSource\n-from .message import MessageType\n+from .message import MessageType, MessageSegment\n \n if TYPE_CHECKING:\n from .bot import Bot\n@@ -64,7 +64,7 @@\n class InvalidArgument(exception.AdapterException):\n \"\"\"\n :\u8bf4\u660e:\n- \n+\n \u8c03\u7528API\u7684\u53c2\u6570\u51fa\u9519\n \"\"\"\n \n@@ -106,7 +106,7 @@\n :\u8bf4\u660e:\n \n \u901a\u8fc7\u51fd\u6570\u7b7e\u540d\u4e2d\u7684\u7c7b\u578b\u6ce8\u89e3\u6765\u5bf9\u4f20\u5165\u53c2\u6570\u8fdb\u884c\u8fd0\u884c\u65f6\u6821\u9a8c\n- \n+\n \u4f1a\u5728\u53c2\u6570\u51fa\u9519\u65f6\u91ca\u653e ``InvalidArgument`` \u5f02\u5e38\n \"\"\"\n function = validate_arguments(config={\n@@ -138,6 +138,8 @@\n event.to_me = True\n else:\n event.message_chain.insert(0, at)\n+ if not event.message_chain:\n+ event.message_chain.append(MessageSegment.plain(''))\n return event\n \n \n@@ -175,4 +177,4 @@\n event = process_nick(bot, event)\n event = process_at(bot, event)\n event = process_reply(bot, event)\n- await handle_event(bot, event)\n\\ No newline at end of file\n+ await handle_event(bot, event)\n", "issue": "Bug: Mirai\u6a21\u5757\u5904\u7406\u4ec5\u542b@Bot\u7684\u6d88\u606f\u65f6\u62a5\u9519\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n\u5bf9\u4e8e\u5355\u72ec\u7684@bot\u6d88\u606f\u5904\u7406\u65f6\u62a5\u9519\u3002\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n\u4f7f\u7528mirai\u6a21\u5757\u5904\u7406\u6d88\u606f\uff0c\u5f53\u5728\u7fa4\u804a\u4e2d\u5355\u72ec@\u673a\u5668\u4eba\uff08\u4e0d\u8ddf\u968f\u5176\u4ed6\u4fe1\u606f\uff09\u65f6\u6d88\u606f\u5904\u7406\u62a5\u9519\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n\u80fd\u591f\u6b63\u5e38\u5904\u7406\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: CentOS & Winodws\r\n - Python Version: 3.9.2\r\n - Nonebot Version: nonebot cli version 0.4.0\r\n\r\n**\u622a\u56fe**\r\n![image](https://user-images.githubusercontent.com/44700327/112746873-7a2cbd80-8fe4-11eb-9b6a-46620c3ee67f.png)\r\n\r\n\n", "before_files": [{"content": "import re\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Coroutine, Optional, TypeVar\n\nimport httpx\nfrom pydantic import Extra, ValidationError, validate_arguments\n\nimport nonebot.exception as exception\nfrom nonebot.log import logger\nfrom nonebot.message import handle_event\nfrom nonebot.utils import escape_tag, logger_wrapper\n\nfrom .event import Event, GroupMessage, MessageEvent, MessageSource\nfrom .message import MessageType\n\nif TYPE_CHECKING:\n from .bot import Bot\n\n_AsyncCallable = TypeVar(\"_AsyncCallable\", bound=Callable[..., Coroutine])\n_AnyCallable = TypeVar(\"_AnyCallable\", bound=Callable)\n\n\nclass Log:\n\n @staticmethod\n def log(level: str, message: str, exception: Optional[Exception] = None):\n logger = logger_wrapper('MIRAI')\n message = '<e>' + escape_tag(message) + '</e>'\n logger(level=level.upper(), message=message, exception=exception)\n\n @classmethod\n def info(cls, message: Any):\n cls.log('INFO', str(message))\n\n @classmethod\n def debug(cls, message: Any):\n cls.log('DEBUG', str(message))\n\n @classmethod\n def warn(cls, message: Any):\n cls.log('WARNING', str(message))\n\n @classmethod\n def error(cls, message: Any, exception: Optional[Exception] = None):\n cls.log('ERROR', str(message), exception=exception)\n\n\nclass ActionFailed(exception.ActionFailed):\n \"\"\"\n :\u8bf4\u660e:\n\n API \u8bf7\u6c42\u6210\u529f\u8fd4\u56de\u6570\u636e\uff0c\u4f46 API \u64cd\u4f5c\u5931\u8d25\u3002\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__('mirai')\n self.data = kwargs.copy()\n\n def __repr__(self):\n return self.__class__.__name__ + '(%s)' % ', '.join(\n map(lambda m: '%s=%r' % m, self.data.items()))\n\n\nclass InvalidArgument(exception.AdapterException):\n \"\"\"\n :\u8bf4\u660e:\n \n \u8c03\u7528API\u7684\u53c2\u6570\u51fa\u9519\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__('mirai')\n\n\ndef catch_network_error(function: _AsyncCallable) -> _AsyncCallable:\n \"\"\"\n :\u8bf4\u660e:\n\n \u6355\u6349\u51fd\u6570\u629b\u51fa\u7684httpx\u7f51\u7edc\u5f02\u5e38\u5e76\u91ca\u653e ``NetworkError`` \u5f02\u5e38\n\n \u5904\u7406\u8fd4\u56de\u6570\u636e, \u5728code\u4e0d\u4e3a0\u65f6\u91ca\u653e ``ActionFailed`` \u5f02\u5e38\n\n \\:\\:\\: warning\n \u6b64\u88c5\u9970\u5668\u53ea\u652f\u6301\u4f7f\u7528\u4e86httpx\u7684\u5f02\u6b65\u51fd\u6570\n \\:\\:\\:\n \"\"\"\n\n @wraps(function)\n async def wrapper(*args, **kwargs):\n try:\n data = await function(*args, **kwargs)\n except httpx.HTTPError:\n raise exception.NetworkError('mirai')\n logger.opt(colors=True).debug('<b>Mirai API returned data:</b> '\n f'<y>{escape_tag(str(data))}</y>')\n if isinstance(data, dict):\n if data.get('code', 0) != 0:\n raise ActionFailed(**data)\n return data\n\n return wrapper # type: ignore\n\n\ndef argument_validation(function: _AnyCallable) -> _AnyCallable:\n \"\"\"\n :\u8bf4\u660e:\n\n \u901a\u8fc7\u51fd\u6570\u7b7e\u540d\u4e2d\u7684\u7c7b\u578b\u6ce8\u89e3\u6765\u5bf9\u4f20\u5165\u53c2\u6570\u8fdb\u884c\u8fd0\u884c\u65f6\u6821\u9a8c\n \n \u4f1a\u5728\u53c2\u6570\u51fa\u9519\u65f6\u91ca\u653e ``InvalidArgument`` \u5f02\u5e38\n \"\"\"\n function = validate_arguments(config={\n 'arbitrary_types_allowed': True,\n 'extra': Extra.forbid\n })(function)\n\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except ValidationError:\n raise InvalidArgument\n\n return wrapper # type: ignore\n\n\ndef process_source(bot: \"Bot\", event: MessageEvent) -> MessageEvent:\n source = event.message_chain.extract_first(MessageType.SOURCE)\n if source is not None:\n event.source = MessageSource.parse_obj(source.data)\n return event\n\n\ndef process_at(bot: \"Bot\", event: GroupMessage) -> GroupMessage:\n at = event.message_chain.extract_first(MessageType.AT)\n if at is not None:\n if at.data['target'] == event.self_id:\n event.to_me = True\n else:\n event.message_chain.insert(0, at)\n return event\n\n\ndef process_nick(bot: \"Bot\", event: GroupMessage) -> GroupMessage:\n plain = event.message_chain.extract_first(MessageType.PLAIN)\n if plain is not None:\n text = str(plain)\n nick_regex = '|'.join(filter(lambda x: x, bot.config.nickname))\n matched = re.search(rf\"^({nick_regex})([\\s,\uff0c]*|$)\", text, re.IGNORECASE)\n if matched is not None:\n event.to_me = True\n nickname = matched.group(1)\n Log.info(f'User is calling me {nickname}')\n plain.data['text'] = text[matched.end():]\n event.message_chain.insert(0, plain)\n return event\n\n\ndef process_reply(bot: \"Bot\", event: GroupMessage) -> GroupMessage:\n reply = event.message_chain.extract_first(MessageType.QUOTE)\n if reply is not None:\n if reply.data['senderId'] == event.self_id:\n event.to_me = True\n else:\n event.message_chain.insert(0, reply)\n return event\n\n\nasync def process_event(bot: \"Bot\", event: Event) -> None:\n if isinstance(event, MessageEvent):\n event.message_chain.reduce()\n Log.debug(event.message_chain)\n event = process_source(bot, event)\n if isinstance(event, GroupMessage):\n event = process_nick(bot, event)\n event = process_at(bot, event)\n event = process_reply(bot, event)\n await handle_event(bot, event)", "path": "packages/nonebot-adapter-mirai/nonebot/adapters/mirai/utils.py"}]}
2,439
389
gh_patches_debug_7953
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow to modify the computed Span name via callback. # Description With Redis and Databases in a traces, there is no easy way from the span name to see if a "SELECT" is going to Redis or the database. ## Type of change Please delete options that are not relevant. - [X] New feature (non-breaking change which adds functionality) # How Has This Been Tested? - [X] TestRedis::test_name_callback # Does This PR Require a Core Repo Change? - [ ] Yes. - Link to PR: - [X] No. # Checklist: See [contributing.md](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/CONTRIBUTING.md) for styleguide, changelog guidelines, and more. - [?] Followed the style guidelines of this project - [X] Changelogs have been updated - [X] Unit tests have been added - [X] Documentation has been updated </issue> <code> [start of instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This library allows tracing HTTP requests made by the 17 `requests <https://requests.readthedocs.io/en/master/>`_ library. 18 19 Usage 20 ----- 21 22 .. code-block:: python 23 24 import requests 25 import opentelemetry.instrumentation.requests 26 27 # You can optionally pass a custom TracerProvider to 28 # RequestInstrumentor.instrument() 29 opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument() 30 response = requests.get(url="https://www.example.org/") 31 32 API 33 --- 34 """ 35 36 import functools 37 import types 38 39 from requests import Timeout, URLRequired 40 from requests.exceptions import InvalidSchema, InvalidURL, MissingSchema 41 from requests.sessions import Session 42 from requests.structures import CaseInsensitiveDict 43 44 from opentelemetry import context, propagators 45 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 46 from opentelemetry.instrumentation.metric import ( 47 HTTPMetricRecorder, 48 HTTPMetricType, 49 MetricMixin, 50 ) 51 from opentelemetry.instrumentation.requests.version import __version__ 52 from opentelemetry.instrumentation.utils import http_status_to_status_code 53 from opentelemetry.trace import SpanKind, get_tracer 54 from opentelemetry.trace.status import Status, StatusCode 55 56 # A key to a context variable to avoid creating duplicate spans when instrumenting 57 # both, Session.request and Session.send, since Session.request calls into Session.send 58 _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY = "suppress_requests_instrumentation" 59 60 61 # pylint: disable=unused-argument 62 # pylint: disable=R0915 63 def _instrument(tracer_provider=None, span_callback=None, name_callback=None): 64 """Enables tracing of all requests calls that go through 65 :code:`requests.session.Session.request` (this includes 66 :code:`requests.get`, etc.).""" 67 68 # Since 69 # https://github.com/psf/requests/commit/d72d1162142d1bf8b1b5711c664fbbd674f349d1 70 # (v0.7.0, Oct 23, 2011), get, post, etc are implemented via request which 71 # again, is implemented via Session.request (`Session` was named `session` 72 # before v1.0.0, Dec 17, 2012, see 73 # https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120) 74 75 wrapped_request = Session.request 76 wrapped_send = Session.send 77 78 @functools.wraps(wrapped_request) 79 def instrumented_request(self, method, url, *args, **kwargs): 80 def get_or_create_headers(): 81 headers = kwargs.get("headers") 82 if headers is None: 83 headers = {} 84 kwargs["headers"] = headers 85 86 return headers 87 88 def call_wrapped(): 89 return wrapped_request(self, method, url, *args, **kwargs) 90 91 return _instrumented_requests_call( 92 method, url, call_wrapped, get_or_create_headers 93 ) 94 95 @functools.wraps(wrapped_send) 96 def instrumented_send(self, request, **kwargs): 97 def get_or_create_headers(): 98 request.headers = ( 99 request.headers 100 if request.headers is not None 101 else CaseInsensitiveDict() 102 ) 103 return request.headers 104 105 def call_wrapped(): 106 return wrapped_send(self, request, **kwargs) 107 108 return _instrumented_requests_call( 109 request.method, request.url, call_wrapped, get_or_create_headers 110 ) 111 112 def _instrumented_requests_call( 113 method: str, url: str, call_wrapped, get_or_create_headers 114 ): 115 if context.get_value("suppress_instrumentation") or context.get_value( 116 _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY 117 ): 118 return call_wrapped() 119 120 # See 121 # https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#http-client 122 method = method.upper() 123 span_name = "" 124 if name_callback is not None: 125 span_name = name_callback() 126 if not span_name or not isinstance(span_name, str): 127 span_name = get_default_span_name(method) 128 129 recorder = RequestsInstrumentor().metric_recorder 130 131 labels = {} 132 labels["http.method"] = method 133 labels["http.url"] = url 134 135 with get_tracer( 136 __name__, __version__, tracer_provider 137 ).start_as_current_span(span_name, kind=SpanKind.CLIENT) as span: 138 exception = None 139 with recorder.record_client_duration(labels): 140 if span.is_recording(): 141 span.set_attribute("component", "http") 142 span.set_attribute("http.method", method) 143 span.set_attribute("http.url", url) 144 145 headers = get_or_create_headers() 146 propagators.inject(type(headers).__setitem__, headers) 147 148 token = context.attach( 149 context.set_value( 150 _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY, True 151 ) 152 ) 153 try: 154 result = call_wrapped() # *** PROCEED 155 except Exception as exc: # pylint: disable=W0703 156 exception = exc 157 result = getattr(exc, "response", None) 158 finally: 159 context.detach(token) 160 161 if result is not None: 162 if span.is_recording(): 163 span.set_attribute( 164 "http.status_code", result.status_code 165 ) 166 span.set_attribute("http.status_text", result.reason) 167 span.set_status( 168 Status( 169 http_status_to_status_code(result.status_code) 170 ) 171 ) 172 labels["http.status_code"] = str(result.status_code) 173 if result.raw and result.raw.version: 174 labels["http.flavor"] = ( 175 str(result.raw.version)[:1] 176 + "." 177 + str(result.raw.version)[:-1] 178 ) 179 if span_callback is not None: 180 span_callback(span, result) 181 182 if exception is not None: 183 raise exception.with_traceback(exception.__traceback__) 184 185 return result 186 187 instrumented_request.opentelemetry_instrumentation_requests_applied = True 188 Session.request = instrumented_request 189 190 instrumented_send.opentelemetry_instrumentation_requests_applied = True 191 Session.send = instrumented_send 192 193 194 def _uninstrument(): 195 """Disables instrumentation of :code:`requests` through this module. 196 197 Note that this only works if no other module also patches requests.""" 198 _uninstrument_from(Session) 199 200 201 def _uninstrument_from(instr_root, restore_as_bound_func=False): 202 for instr_func_name in ("request", "send"): 203 instr_func = getattr(instr_root, instr_func_name) 204 if not getattr( 205 instr_func, 206 "opentelemetry_instrumentation_requests_applied", 207 False, 208 ): 209 continue 210 211 original = instr_func.__wrapped__ # pylint:disable=no-member 212 if restore_as_bound_func: 213 original = types.MethodType(original, instr_root) 214 setattr(instr_root, instr_func_name, original) 215 216 217 def get_default_span_name(method): 218 """Default implementation for name_callback, returns HTTP {method_name}.""" 219 return "HTTP {}".format(method).strip() 220 221 222 class RequestsInstrumentor(BaseInstrumentor, MetricMixin): 223 """An instrumentor for requests 224 See `BaseInstrumentor` 225 """ 226 227 def _instrument(self, **kwargs): 228 """Instruments requests module 229 230 Args: 231 **kwargs: Optional arguments 232 ``tracer_provider``: a TracerProvider, defaults to global 233 ``span_callback``: An optional callback invoked before returning the http response. Invoked with Span and requests.Response 234 ``name_callback``: Callback which calculates a generic span name for an 235 outgoing HTTP request based on the method and url. 236 Optional: Defaults to get_default_span_name. 237 """ 238 _instrument( 239 tracer_provider=kwargs.get("tracer_provider"), 240 span_callback=kwargs.get("span_callback"), 241 name_callback=kwargs.get("name_callback"), 242 ) 243 self.init_metrics( 244 __name__, __version__, 245 ) 246 # pylint: disable=W0201 247 self.metric_recorder = HTTPMetricRecorder( 248 self.meter, HTTPMetricType.CLIENT 249 ) 250 251 def _uninstrument(self, **kwargs): 252 _uninstrument() 253 254 @staticmethod 255 def uninstrument_session(session): 256 """Disables instrumentation on the session object.""" 257 _uninstrument_from(session, restore_as_bound_func=True) 258 [end of instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py --- a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py @@ -25,7 +25,7 @@ import opentelemetry.instrumentation.requests # You can optionally pass a custom TracerProvider to - # RequestInstrumentor.instrument() + # RequestsInstrumentor.instrument() opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument() response = requests.get(url="https://www.example.org/")
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py\n@@ -25,7 +25,7 @@\n import opentelemetry.instrumentation.requests\n \n # You can optionally pass a custom TracerProvider to\n- # RequestInstrumentor.instrument()\n+ # RequestsInstrumentor.instrument()\n opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument()\n response = requests.get(url=\"https://www.example.org/\")\n", "issue": "Allow to modify the computed Span name via callback.\n# Description\r\n\r\nWith Redis and Databases in a traces, there is no easy way from the span name to see if a \"SELECT\" is going to Redis or the database.\r\n\r\n\r\n## Type of change\r\n\r\nPlease delete options that are not relevant.\r\n\r\n- [X] New feature (non-breaking change which adds functionality)\r\n\r\n# How Has This Been Tested?\r\n\r\n- [X] TestRedis::test_name_callback\r\n\r\n# Does This PR Require a Core Repo Change?\r\n\r\n- [ ] Yes. - Link to PR: \r\n- [X] No.\r\n\r\n# Checklist:\r\n\r\nSee [contributing.md](https://github.com/open-telemetry/opentelemetry-python-contrib/blob/main/CONTRIBUTING.md) for styleguide, changelog guidelines, and more.\r\n\r\n- [?] Followed the style guidelines of this project\r\n- [X] Changelogs have been updated\r\n- [X] Unit tests have been added\r\n- [X] Documentation has been updated\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows tracing HTTP requests made by the\n`requests <https://requests.readthedocs.io/en/master/>`_ library.\n\nUsage\n-----\n\n.. code-block:: python\n\n import requests\n import opentelemetry.instrumentation.requests\n\n # You can optionally pass a custom TracerProvider to\n # RequestInstrumentor.instrument()\n opentelemetry.instrumentation.requests.RequestsInstrumentor().instrument()\n response = requests.get(url=\"https://www.example.org/\")\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport types\n\nfrom requests import Timeout, URLRequired\nfrom requests.exceptions import InvalidSchema, InvalidURL, MissingSchema\nfrom requests.sessions import Session\nfrom requests.structures import CaseInsensitiveDict\n\nfrom opentelemetry import context, propagators\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.metric import (\n HTTPMetricRecorder,\n HTTPMetricType,\n MetricMixin,\n)\nfrom opentelemetry.instrumentation.requests.version import __version__\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.trace import SpanKind, get_tracer\nfrom opentelemetry.trace.status import Status, StatusCode\n\n# A key to a context variable to avoid creating duplicate spans when instrumenting\n# both, Session.request and Session.send, since Session.request calls into Session.send\n_SUPPRESS_REQUESTS_INSTRUMENTATION_KEY = \"suppress_requests_instrumentation\"\n\n\n# pylint: disable=unused-argument\n# pylint: disable=R0915\ndef _instrument(tracer_provider=None, span_callback=None, name_callback=None):\n \"\"\"Enables tracing of all requests calls that go through\n :code:`requests.session.Session.request` (this includes\n :code:`requests.get`, etc.).\"\"\"\n\n # Since\n # https://github.com/psf/requests/commit/d72d1162142d1bf8b1b5711c664fbbd674f349d1\n # (v0.7.0, Oct 23, 2011), get, post, etc are implemented via request which\n # again, is implemented via Session.request (`Session` was named `session`\n # before v1.0.0, Dec 17, 2012, see\n # https://github.com/psf/requests/commit/4e5c4a6ab7bb0195dececdd19bb8505b872fe120)\n\n wrapped_request = Session.request\n wrapped_send = Session.send\n\n @functools.wraps(wrapped_request)\n def instrumented_request(self, method, url, *args, **kwargs):\n def get_or_create_headers():\n headers = kwargs.get(\"headers\")\n if headers is None:\n headers = {}\n kwargs[\"headers\"] = headers\n\n return headers\n\n def call_wrapped():\n return wrapped_request(self, method, url, *args, **kwargs)\n\n return _instrumented_requests_call(\n method, url, call_wrapped, get_or_create_headers\n )\n\n @functools.wraps(wrapped_send)\n def instrumented_send(self, request, **kwargs):\n def get_or_create_headers():\n request.headers = (\n request.headers\n if request.headers is not None\n else CaseInsensitiveDict()\n )\n return request.headers\n\n def call_wrapped():\n return wrapped_send(self, request, **kwargs)\n\n return _instrumented_requests_call(\n request.method, request.url, call_wrapped, get_or_create_headers\n )\n\n def _instrumented_requests_call(\n method: str, url: str, call_wrapped, get_or_create_headers\n ):\n if context.get_value(\"suppress_instrumentation\") or context.get_value(\n _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY\n ):\n return call_wrapped()\n\n # See\n # https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/http.md#http-client\n method = method.upper()\n span_name = \"\"\n if name_callback is not None:\n span_name = name_callback()\n if not span_name or not isinstance(span_name, str):\n span_name = get_default_span_name(method)\n\n recorder = RequestsInstrumentor().metric_recorder\n\n labels = {}\n labels[\"http.method\"] = method\n labels[\"http.url\"] = url\n\n with get_tracer(\n __name__, __version__, tracer_provider\n ).start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:\n exception = None\n with recorder.record_client_duration(labels):\n if span.is_recording():\n span.set_attribute(\"component\", \"http\")\n span.set_attribute(\"http.method\", method)\n span.set_attribute(\"http.url\", url)\n\n headers = get_or_create_headers()\n propagators.inject(type(headers).__setitem__, headers)\n\n token = context.attach(\n context.set_value(\n _SUPPRESS_REQUESTS_INSTRUMENTATION_KEY, True\n )\n )\n try:\n result = call_wrapped() # *** PROCEED\n except Exception as exc: # pylint: disable=W0703\n exception = exc\n result = getattr(exc, \"response\", None)\n finally:\n context.detach(token)\n\n if result is not None:\n if span.is_recording():\n span.set_attribute(\n \"http.status_code\", result.status_code\n )\n span.set_attribute(\"http.status_text\", result.reason)\n span.set_status(\n Status(\n http_status_to_status_code(result.status_code)\n )\n )\n labels[\"http.status_code\"] = str(result.status_code)\n if result.raw and result.raw.version:\n labels[\"http.flavor\"] = (\n str(result.raw.version)[:1]\n + \".\"\n + str(result.raw.version)[:-1]\n )\n if span_callback is not None:\n span_callback(span, result)\n\n if exception is not None:\n raise exception.with_traceback(exception.__traceback__)\n\n return result\n\n instrumented_request.opentelemetry_instrumentation_requests_applied = True\n Session.request = instrumented_request\n\n instrumented_send.opentelemetry_instrumentation_requests_applied = True\n Session.send = instrumented_send\n\n\ndef _uninstrument():\n \"\"\"Disables instrumentation of :code:`requests` through this module.\n\n Note that this only works if no other module also patches requests.\"\"\"\n _uninstrument_from(Session)\n\n\ndef _uninstrument_from(instr_root, restore_as_bound_func=False):\n for instr_func_name in (\"request\", \"send\"):\n instr_func = getattr(instr_root, instr_func_name)\n if not getattr(\n instr_func,\n \"opentelemetry_instrumentation_requests_applied\",\n False,\n ):\n continue\n\n original = instr_func.__wrapped__ # pylint:disable=no-member\n if restore_as_bound_func:\n original = types.MethodType(original, instr_root)\n setattr(instr_root, instr_func_name, original)\n\n\ndef get_default_span_name(method):\n \"\"\"Default implementation for name_callback, returns HTTP {method_name}.\"\"\"\n return \"HTTP {}\".format(method).strip()\n\n\nclass RequestsInstrumentor(BaseInstrumentor, MetricMixin):\n \"\"\"An instrumentor for requests\n See `BaseInstrumentor`\n \"\"\"\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments requests module\n\n Args:\n **kwargs: Optional arguments\n ``tracer_provider``: a TracerProvider, defaults to global\n ``span_callback``: An optional callback invoked before returning the http response. Invoked with Span and requests.Response\n ``name_callback``: Callback which calculates a generic span name for an\n outgoing HTTP request based on the method and url.\n Optional: Defaults to get_default_span_name.\n \"\"\"\n _instrument(\n tracer_provider=kwargs.get(\"tracer_provider\"),\n span_callback=kwargs.get(\"span_callback\"),\n name_callback=kwargs.get(\"name_callback\"),\n )\n self.init_metrics(\n __name__, __version__,\n )\n # pylint: disable=W0201\n self.metric_recorder = HTTPMetricRecorder(\n self.meter, HTTPMetricType.CLIENT\n )\n\n def _uninstrument(self, **kwargs):\n _uninstrument()\n\n @staticmethod\n def uninstrument_session(session):\n \"\"\"Disables instrumentation on the session object.\"\"\"\n _uninstrument_from(session, restore_as_bound_func=True)\n", "path": "instrumentation/opentelemetry-instrumentation-requests/src/opentelemetry/instrumentation/requests/__init__.py"}]}
3,403
185
gh_patches_debug_16131
rasdani/github-patches
git_diff
mabel-dev__opteryx-1593
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 🪲 random appears to be evaluated once was seeing what happens for opteryx for https://buttondown.email/jaffray/archive/the-case-of-a-curious-sql-query/ ~~~sql SELECT count(*) FROM GENERATE_SERIES(1000) AS one_thousand CROSS JOIN GENERATE_SERIES(1000) AS one_thousand_b WHERE random() < 0.5 ~~~ ~~~ AFTER COST OPTIMIZATION └─ EXIT └─ PROJECT (COUNT(*)) └─ AGGREGATE (COUNT(*)) └─ FILTER (False) └─ CROSS JOIN ├─ GENERATE SERIES (1000) AS one_thousand └─ GENERATE SERIES (1000) AS one_thousand_b ~~~ </issue> <code> [start of opteryx/components/cost_based_optimizer/strategies/constant_folding.py] 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import datetime 14 from typing import Any 15 16 import numpy 17 from orso.types import OrsoTypes 18 19 from opteryx.components.logical_planner import LogicalPlan 20 from opteryx.components.logical_planner import LogicalPlanNode 21 from opteryx.components.logical_planner import LogicalPlanStepType 22 from opteryx.managers.expression import NodeType 23 from opteryx.managers.expression import evaluate 24 from opteryx.managers.expression import get_all_nodes_of_type 25 from opteryx.models import Node 26 from opteryx.virtual_datasets import no_table_data 27 28 from .optimization_strategy import OptimizationStrategy 29 from .optimization_strategy import OptimizerContext 30 31 32 def build_literal_node(value: Any, root: Node): 33 # fmt:off 34 if hasattr(value, "as_py"): 35 value = value.as_py() 36 37 root.value = value 38 root.node_type = NodeType.LITERAL 39 if value is None: 40 root.type=OrsoTypes.NULL 41 elif isinstance(value, (bool, numpy.bool_)): 42 # boolean must be before numeric 43 root.type=OrsoTypes.BOOLEAN 44 elif isinstance(value, (str)): 45 root.type=OrsoTypes.VARCHAR 46 elif isinstance(value, (int, numpy.int64)): 47 root.type=OrsoTypes.INTEGER 48 elif isinstance(value, (numpy.datetime64, datetime.datetime)): 49 root.type=OrsoTypes.TIMESTAMP 50 elif isinstance(value, (datetime.date)): 51 root.type=OrsoTypes.DATE 52 else: 53 raise Exception("Unable to fold expression") 54 return root 55 # fmt:on 56 57 58 def fold_constants(root: Node) -> Node: 59 identifiers = get_all_nodes_of_type(root, (NodeType.IDENTIFIER, NodeType.WILDCARD)) 60 if len(identifiers) == 0: 61 table = no_table_data.read() 62 try: 63 result = evaluate(root, table, None)[0] 64 return build_literal_node(result, root) 65 except Exception as err: # nosec 66 # what ever the reason, just skip 67 # DEBUG:log (err) 68 pass 69 return root 70 71 72 class ConstantFoldingStrategy(OptimizationStrategy): 73 def visit(self, node: LogicalPlanNode, context: OptimizerContext) -> OptimizerContext: 74 """ 75 Constant Folding is when we precalculate expressions (or sub expressions) 76 which contain only constant or literal values. 77 """ 78 if not context.optimized_plan: 79 context.optimized_plan = context.pre_optimized_tree.copy() # type: ignore 80 81 if node.node_type == LogicalPlanStepType.Filter: 82 node.condition = fold_constants(node.condition) 83 if node.condition.node_type == NodeType.LITERAL and node.condition.value: 84 context.optimized_plan.remove_node(context.node_id, heal=True) 85 else: 86 context.optimized_plan[context.node_id] = node 87 88 return context 89 90 def complete(self, plan: LogicalPlan, context: OptimizerContext) -> LogicalPlan: 91 # No finalization needed for this strategy 92 return plan 93 [end of opteryx/components/cost_based_optimizer/strategies/constant_folding.py] [start of opteryx/__version__.py] 1 __build__ = 430 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 Store the version here so: 17 1) we don't load dependencies by storing it in __init__.py 18 2) we can import it in setup.py for the same reason 19 """ 20 from enum import Enum # isort: skip 21 22 23 class VersionStatus(Enum): 24 ALPHA = "alpha" 25 BETA = "beta" 26 RELEASE = "release" 27 28 29 _major = 0 30 _minor = 14 31 _revision = 2 32 _status = VersionStatus.ALPHA 33 34 __author__ = "@joocer" 35 __version__ = f"{_major}.{_minor}.{_revision}" + ( 36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else "" 37 ) 38 [end of opteryx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opteryx/__version__.py b/opteryx/__version__.py --- a/opteryx/__version__.py +++ b/opteryx/__version__.py @@ -1,4 +1,4 @@ -__build__ = 430 +__build__ = 432 # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/opteryx/components/cost_based_optimizer/strategies/constant_folding.py b/opteryx/components/cost_based_optimizer/strategies/constant_folding.py --- a/opteryx/components/cost_based_optimizer/strategies/constant_folding.py +++ b/opteryx/components/cost_based_optimizer/strategies/constant_folding.py @@ -57,6 +57,11 @@ def fold_constants(root: Node) -> Node: identifiers = get_all_nodes_of_type(root, (NodeType.IDENTIFIER, NodeType.WILDCARD)) + functions = get_all_nodes_of_type(root, (NodeType.FUNCTION,)) + + if any(func.value in {"RANDOM", "RAND", "NORMAL", "RANDOM_STRING"} for func in functions): + return root + if len(identifiers) == 0: table = no_table_data.read() try:
{"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 430\n+__build__ = 432\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/components/cost_based_optimizer/strategies/constant_folding.py b/opteryx/components/cost_based_optimizer/strategies/constant_folding.py\n--- a/opteryx/components/cost_based_optimizer/strategies/constant_folding.py\n+++ b/opteryx/components/cost_based_optimizer/strategies/constant_folding.py\n@@ -57,6 +57,11 @@\n \n def fold_constants(root: Node) -> Node:\n identifiers = get_all_nodes_of_type(root, (NodeType.IDENTIFIER, NodeType.WILDCARD))\n+ functions = get_all_nodes_of_type(root, (NodeType.FUNCTION,))\n+\n+ if any(func.value in {\"RANDOM\", \"RAND\", \"NORMAL\", \"RANDOM_STRING\"} for func in functions):\n+ return root\n+\n if len(identifiers) == 0:\n table = no_table_data.read()\n try:\n", "issue": "\ud83e\udeb2 random appears to be evaluated once\nwas seeing what happens for opteryx for https://buttondown.email/jaffray/archive/the-case-of-a-curious-sql-query/\r\n\r\n~~~sql\r\nSELECT count(*) \r\nFROM GENERATE_SERIES(1000) AS one_thousand \r\nCROSS JOIN GENERATE_SERIES(1000) AS one_thousand_b \r\nWHERE random() < 0.5\r\n~~~\r\n\r\n~~~\r\nAFTER COST OPTIMIZATION\r\n\u2514\u2500 EXIT\r\n \u2514\u2500 PROJECT (COUNT(*))\r\n \u2514\u2500 AGGREGATE (COUNT(*))\r\n \u2514\u2500 FILTER (False)\r\n \u2514\u2500 CROSS JOIN\r\n \u251c\u2500 GENERATE SERIES (1000) AS one_thousand\r\n \u2514\u2500 GENERATE SERIES (1000) AS one_thousand_b\r\n~~~\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport datetime\nfrom typing import Any\n\nimport numpy\nfrom orso.types import OrsoTypes\n\nfrom opteryx.components.logical_planner import LogicalPlan\nfrom opteryx.components.logical_planner import LogicalPlanNode\nfrom opteryx.components.logical_planner import LogicalPlanStepType\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.managers.expression import evaluate\nfrom opteryx.managers.expression import get_all_nodes_of_type\nfrom opteryx.models import Node\nfrom opteryx.virtual_datasets import no_table_data\n\nfrom .optimization_strategy import OptimizationStrategy\nfrom .optimization_strategy import OptimizerContext\n\n\ndef build_literal_node(value: Any, root: Node):\n # fmt:off\n if hasattr(value, \"as_py\"):\n value = value.as_py()\n\n root.value = value\n root.node_type = NodeType.LITERAL\n if value is None:\n root.type=OrsoTypes.NULL\n elif isinstance(value, (bool, numpy.bool_)):\n # boolean must be before numeric\n root.type=OrsoTypes.BOOLEAN\n elif isinstance(value, (str)):\n root.type=OrsoTypes.VARCHAR\n elif isinstance(value, (int, numpy.int64)):\n root.type=OrsoTypes.INTEGER\n elif isinstance(value, (numpy.datetime64, datetime.datetime)):\n root.type=OrsoTypes.TIMESTAMP\n elif isinstance(value, (datetime.date)):\n root.type=OrsoTypes.DATE\n else:\n raise Exception(\"Unable to fold expression\")\n return root\n # fmt:on\n\n\ndef fold_constants(root: Node) -> Node:\n identifiers = get_all_nodes_of_type(root, (NodeType.IDENTIFIER, NodeType.WILDCARD))\n if len(identifiers) == 0:\n table = no_table_data.read()\n try:\n result = evaluate(root, table, None)[0]\n return build_literal_node(result, root)\n except Exception as err: # nosec\n # what ever the reason, just skip\n # DEBUG:log (err)\n pass\n return root\n\n\nclass ConstantFoldingStrategy(OptimizationStrategy):\n def visit(self, node: LogicalPlanNode, context: OptimizerContext) -> OptimizerContext:\n \"\"\"\n Constant Folding is when we precalculate expressions (or sub expressions)\n which contain only constant or literal values.\n \"\"\"\n if not context.optimized_plan:\n context.optimized_plan = context.pre_optimized_tree.copy() # type: ignore\n\n if node.node_type == LogicalPlanStepType.Filter:\n node.condition = fold_constants(node.condition)\n if node.condition.node_type == NodeType.LITERAL and node.condition.value:\n context.optimized_plan.remove_node(context.node_id, heal=True)\n else:\n context.optimized_plan[context.node_id] = node\n\n return context\n\n def complete(self, plan: LogicalPlan, context: OptimizerContext) -> LogicalPlan:\n # No finalization needed for this strategy\n return plan\n", "path": "opteryx/components/cost_based_optimizer/strategies/constant_folding.py"}, {"content": "__build__ = 430\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 14\n_revision = 2\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]}
2,049
292
gh_patches_debug_20592
rasdani/github-patches
git_diff
litestar-org__litestar-1961
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Local run of `mypy` produces unexpected results ### Description Mypy produces unexpected failures locally, but the CI job passes: https://github.com/litestar-org/litestar/actions/runs/5561581892/jobs/10159348023?pr=1959 I think that this happens because `.pre-commit-config` and local mypy settings are not in sync. I will fix that. ### URL to code causing the issue _No response_ ### MCVE _No response_ ### Steps to reproduce ```bash 1. `poetry install --with lint -E full` 2. `poetry run mypy litestar` ``` ### Screenshots _No response_ ### Logs ```bash » mypy litestar litestar/contrib/jwt/jwt_token.py:115: error: Redundant cast to "str" [redundant-cast] litestar/_signature/models/attrs_signature_model.py:43: error: Skipping analyzing "pytimeparse.timeparse": module is installed, but missing library stubs or py.typed marker [import] litestar/_signature/models/attrs_signature_model.py:43: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports litestar/middleware/compression.py:29: error: Skipping analyzing "brotli": module is installed, but missing library stubs or py.typed marker [import] litestar/contrib/mako.py:19: error: Skipping analyzing "mako": module is installed, but missing library stubs or py.typed marker [import] litestar/contrib/mako.py:24: error: Skipping analyzing "mako.exceptions": module is installed, but missing library stubs or py.typed marker [import] litestar/contrib/mako.py:25: error: Skipping analyzing "mako.lookup": module is installed, but missing library stubs or py.typed marker [import] litestar/contrib/mako.py:28: error: Skipping analyzing "mako.template": module is installed, but missing library stubs or py.typed marker [import] litestar/cli/commands/schema.py:5: error: Skipping analyzing "jsbeautifier": module is installed, but missing library stubs or py.typed marker [import] Found 8 errors in 5 files (checked 303 source files) ``` ### Litestar Version `main` ### Platform - [ ] Linux - [X] Mac - [ ] Windows - [ ] Other (Please specify in the description above) <!-- POLAR PLEDGE BADGE START --> ## Funding * If you would like to see an issue prioritized, make a pledge towards it! * We receive the pledge once the issue is completed & verified <a href="https://polar.sh/litestar-org/litestar/issues/1960"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/1960/pledge.svg?darkmode=1"> <img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/1960/pledge.svg"> </picture> </a> <!-- POLAR PLEDGE BADGE END --> </issue> <code> [start of litestar/contrib/jwt/jwt_token.py] 1 from __future__ import annotations 2 3 import dataclasses 4 from dataclasses import asdict, dataclass, field 5 from datetime import datetime, timezone 6 from typing import TYPE_CHECKING, Any, cast 7 8 from jose import JWSError, JWTError, jwt 9 10 from litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException 11 12 if TYPE_CHECKING: 13 from typing_extensions import Self 14 15 16 __all__ = ("Token",) 17 18 19 def _normalize_datetime(value: datetime) -> datetime: 20 """Convert the given value into UTC and strip microseconds. 21 22 Args: 23 value: A datetime instance 24 25 Returns: 26 A datetime instance 27 """ 28 if value.tzinfo is not None: 29 value.astimezone(timezone.utc) 30 31 return value.replace(microsecond=0) 32 33 34 @dataclass 35 class Token: 36 """JWT Token DTO.""" 37 38 exp: datetime 39 """Expiration - datetime for token expiration.""" 40 sub: str 41 """Subject - usually a unique identifier of the user or equivalent entity.""" 42 iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc))) 43 """Issued at - should always be current now.""" 44 iss: str | None = field(default=None) 45 """Issuer - optional unique identifier for the issuer.""" 46 aud: str | None = field(default=None) 47 """Audience - intended audience.""" 48 jti: str | None = field(default=None) 49 """JWT ID - a unique identifier of the JWT between different issuers.""" 50 extras: dict[str, Any] = field(default_factory=dict) 51 """Extra fields that were found on the JWT token.""" 52 53 def __post_init__(self) -> None: 54 if len(self.sub) < 1: 55 raise ImproperlyConfiguredException("sub must be a string with a length greater than 0") 56 57 if isinstance(self.exp, datetime) and ( 58 (exp := _normalize_datetime(self.exp)) 59 and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp() 60 ): 61 self.exp = exp 62 else: 63 raise ImproperlyConfiguredException("exp value must be a datetime in the future") 64 65 if isinstance(self.iat, datetime) and ( 66 (iat := _normalize_datetime(self.iat)) 67 and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp() 68 ): 69 self.iat = iat 70 else: 71 raise ImproperlyConfiguredException("iat must be a current or past time") 72 73 @classmethod 74 def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self: 75 """Decode a passed in token string and returns a Token instance. 76 77 Args: 78 encoded_token: A base64 string containing an encoded JWT. 79 secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict 80 algorithm: The algorithm used to encode the JWT. 81 82 Returns: 83 A decoded Token instance. 84 85 Raises: 86 NotAuthorizedException: If the token is invalid. 87 """ 88 try: 89 payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={"verify_aud": False}) 90 exp = datetime.fromtimestamp(payload.pop("exp"), tz=timezone.utc) 91 iat = datetime.fromtimestamp(payload.pop("iat"), tz=timezone.utc) 92 field_names = {f.name for f in dataclasses.fields(Token)} 93 extra_fields = payload.keys() - field_names 94 extras = payload.pop("extras", {}) 95 for key in extra_fields: 96 extras[key] = payload.pop(key) 97 return cls(exp=exp, iat=iat, **payload, extras=extras) 98 except (KeyError, JWTError, ImproperlyConfiguredException) as e: 99 raise NotAuthorizedException("Invalid token") from e 100 101 def encode(self, secret: str, algorithm: str) -> str: 102 """Encode the token instance into a string. 103 104 Args: 105 secret: The secret with which the JWT is encoded. 106 algorithm: The algorithm used to encode the JWT. 107 108 Returns: 109 An encoded token string. 110 111 Raises: 112 ImproperlyConfiguredException: If encoding fails. 113 """ 114 try: 115 return cast( 116 "str", 117 jwt.encode( 118 claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm 119 ), 120 ) 121 except (JWTError, JWSError) as e: 122 raise ImproperlyConfiguredException("Failed to encode token") from e 123 [end of litestar/contrib/jwt/jwt_token.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py --- a/litestar/contrib/jwt/jwt_token.py +++ b/litestar/contrib/jwt/jwt_token.py @@ -3,7 +3,7 @@ import dataclasses from dataclasses import asdict, dataclass, field from datetime import datetime, timezone -from typing import TYPE_CHECKING, Any, cast +from typing import TYPE_CHECKING, Any from jose import JWSError, JWTError, jwt @@ -112,11 +112,8 @@ ImproperlyConfiguredException: If encoding fails. """ try: - return cast( - "str", - jwt.encode( - claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm - ), + return jwt.encode( + claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm ) except (JWTError, JWSError) as e: raise ImproperlyConfiguredException("Failed to encode token") from e
{"golden_diff": "diff --git a/litestar/contrib/jwt/jwt_token.py b/litestar/contrib/jwt/jwt_token.py\n--- a/litestar/contrib/jwt/jwt_token.py\n+++ b/litestar/contrib/jwt/jwt_token.py\n@@ -3,7 +3,7 @@\n import dataclasses\n from dataclasses import asdict, dataclass, field\n from datetime import datetime, timezone\n-from typing import TYPE_CHECKING, Any, cast\n+from typing import TYPE_CHECKING, Any\n \n from jose import JWSError, JWTError, jwt\n \n@@ -112,11 +112,8 @@\n ImproperlyConfiguredException: If encoding fails.\n \"\"\"\n try:\n- return cast(\n- \"str\",\n- jwt.encode(\n- claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n- ),\n+ return jwt.encode(\n+ claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n )\n except (JWTError, JWSError) as e:\n raise ImproperlyConfiguredException(\"Failed to encode token\") from e\n", "issue": "Local run of `mypy` produces unexpected results\n### Description\r\n\r\nMypy produces unexpected failures locally, but the CI job passes: https://github.com/litestar-org/litestar/actions/runs/5561581892/jobs/10159348023?pr=1959\r\n\r\nI think that this happens because `.pre-commit-config` and local mypy settings are not in sync.\r\nI will fix that.\r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\n```bash\r\n1. `poetry install --with lint -E full`\r\n2. `poetry run mypy litestar`\r\n```\r\n\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n```bash\r\n\u00bb mypy litestar \r\nlitestar/contrib/jwt/jwt_token.py:115: error: Redundant cast to \"str\" [redundant-cast]\r\nlitestar/_signature/models/attrs_signature_model.py:43: error: Skipping analyzing \"pytimeparse.timeparse\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/_signature/models/attrs_signature_model.py:43: note: See https://mypy.readthedocs.io/en/stable/running_mypy.html#missing-imports\r\nlitestar/middleware/compression.py:29: error: Skipping analyzing \"brotli\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/contrib/mako.py:19: error: Skipping analyzing \"mako\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/contrib/mako.py:24: error: Skipping analyzing \"mako.exceptions\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/contrib/mako.py:25: error: Skipping analyzing \"mako.lookup\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/contrib/mako.py:28: error: Skipping analyzing \"mako.template\": module is installed, but missing library stubs or py.typed marker [import]\r\nlitestar/cli/commands/schema.py:5: error: Skipping analyzing \"jsbeautifier\": module is installed, but missing library stubs or py.typed marker [import]\r\nFound 8 errors in 5 files (checked 303 source files)\r\n```\r\n\r\n\r\n### Litestar Version\r\n\r\n`main`\r\n\r\n### Platform\r\n\r\n- [ ] Linux\r\n- [X] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n## Funding\r\n* If you would like to see an issue prioritized, make a pledge towards it!\r\n* We receive the pledge once the issue is completed & verified\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/1960\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/1960/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/1960/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom dataclasses import asdict, dataclass, field\nfrom datetime import datetime, timezone\nfrom typing import TYPE_CHECKING, Any, cast\n\nfrom jose import JWSError, JWTError, jwt\n\nfrom litestar.exceptions import ImproperlyConfiguredException, NotAuthorizedException\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n\n__all__ = (\"Token\",)\n\n\ndef _normalize_datetime(value: datetime) -> datetime:\n \"\"\"Convert the given value into UTC and strip microseconds.\n\n Args:\n value: A datetime instance\n\n Returns:\n A datetime instance\n \"\"\"\n if value.tzinfo is not None:\n value.astimezone(timezone.utc)\n\n return value.replace(microsecond=0)\n\n\n@dataclass\nclass Token:\n \"\"\"JWT Token DTO.\"\"\"\n\n exp: datetime\n \"\"\"Expiration - datetime for token expiration.\"\"\"\n sub: str\n \"\"\"Subject - usually a unique identifier of the user or equivalent entity.\"\"\"\n iat: datetime = field(default_factory=lambda: _normalize_datetime(datetime.now(timezone.utc)))\n \"\"\"Issued at - should always be current now.\"\"\"\n iss: str | None = field(default=None)\n \"\"\"Issuer - optional unique identifier for the issuer.\"\"\"\n aud: str | None = field(default=None)\n \"\"\"Audience - intended audience.\"\"\"\n jti: str | None = field(default=None)\n \"\"\"JWT ID - a unique identifier of the JWT between different issuers.\"\"\"\n extras: dict[str, Any] = field(default_factory=dict)\n \"\"\"Extra fields that were found on the JWT token.\"\"\"\n\n def __post_init__(self) -> None:\n if len(self.sub) < 1:\n raise ImproperlyConfiguredException(\"sub must be a string with a length greater than 0\")\n\n if isinstance(self.exp, datetime) and (\n (exp := _normalize_datetime(self.exp))\n and exp.timestamp() >= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.exp = exp\n else:\n raise ImproperlyConfiguredException(\"exp value must be a datetime in the future\")\n\n if isinstance(self.iat, datetime) and (\n (iat := _normalize_datetime(self.iat))\n and iat.timestamp() <= _normalize_datetime(datetime.now(timezone.utc)).timestamp()\n ):\n self.iat = iat\n else:\n raise ImproperlyConfiguredException(\"iat must be a current or past time\")\n\n @classmethod\n def decode(cls, encoded_token: str, secret: str | dict[str, str], algorithm: str) -> Self:\n \"\"\"Decode a passed in token string and returns a Token instance.\n\n Args:\n encoded_token: A base64 string containing an encoded JWT.\n secret: The secret with which the JWT is encoded. It may optionally be an individual JWK or JWS set dict\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n A decoded Token instance.\n\n Raises:\n NotAuthorizedException: If the token is invalid.\n \"\"\"\n try:\n payload = jwt.decode(token=encoded_token, key=secret, algorithms=[algorithm], options={\"verify_aud\": False})\n exp = datetime.fromtimestamp(payload.pop(\"exp\"), tz=timezone.utc)\n iat = datetime.fromtimestamp(payload.pop(\"iat\"), tz=timezone.utc)\n field_names = {f.name for f in dataclasses.fields(Token)}\n extra_fields = payload.keys() - field_names\n extras = payload.pop(\"extras\", {})\n for key in extra_fields:\n extras[key] = payload.pop(key)\n return cls(exp=exp, iat=iat, **payload, extras=extras)\n except (KeyError, JWTError, ImproperlyConfiguredException) as e:\n raise NotAuthorizedException(\"Invalid token\") from e\n\n def encode(self, secret: str, algorithm: str) -> str:\n \"\"\"Encode the token instance into a string.\n\n Args:\n secret: The secret with which the JWT is encoded.\n algorithm: The algorithm used to encode the JWT.\n\n Returns:\n An encoded token string.\n\n Raises:\n ImproperlyConfiguredException: If encoding fails.\n \"\"\"\n try:\n return cast(\n \"str\",\n jwt.encode(\n claims={k: v for k, v in asdict(self).items() if v is not None}, key=secret, algorithm=algorithm\n ),\n )\n except (JWTError, JWSError) as e:\n raise ImproperlyConfiguredException(\"Failed to encode token\") from e\n", "path": "litestar/contrib/jwt/jwt_token.py"}]}
2,530
272
gh_patches_debug_9645
rasdani/github-patches
git_diff
wemake-services__wemake-python-styleguide-2343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WPS472 only detecting one pure variable assignment ### What's wrong WPS472 detects `name, *_ = get_address()`. When it comes to slightly more complex value assignments, it does not work: 1. `names[0], *_ = get_address()` 2. `self.name, *_ = get_address()` 3. `(name, street), *_ = get_address()` Also when extracting a second (or more values) the detection fails: 1. `name, street, *_ = get_address()` 2. `name, _, city, *_ = get_address()` 3. `name, _, self.city, *_ = get_address()` ### How it should be It would be nice if all of the examples would also be detected as WPS472 violations. ### Flake8 version and plugins { "dependencies": [], "platform": { "python_implementation": "CPython", "python_version": "3.9.6", "system": "Darwin" }, "plugins": [ { "is_local": false, "plugin": "flake8-bandit", "version": "2.1.2" }, { "is_local": false, "plugin": "flake8-broken-line", "version": "0.3.0" }, { "is_local": false, "plugin": "flake8-bugbear", "version": "20.11.1" }, { "is_local": false, "plugin": "flake8-comprehensions", "version": "3.4.0" }, { "is_local": false, "plugin": "flake8-darglint", "version": "1.8.0" }, { "is_local": false, "plugin": "flake8-debugger", "version": "4.0.0" }, { "is_local": false, "plugin": "flake8-docstrings", "version": "1.6.0, pydocstyle: 6.0.0" }, { "is_local": false, "plugin": "flake8-eradicate", "version": "1.0.0" }, { "is_local": false, "plugin": "flake8-string-format", "version": "0.3.0" }, { "is_local": false, "plugin": "flake8.datetimez", "version": "20.10.0" }, { "is_local": false, "plugin": "flake8_commas", "version": "2.0.0" }, { "is_local": false, "plugin": "flake8_isort", "version": "4.0.0" }, { "is_local": false, "plugin": "flake8_quotes", "version": "3.2.0" }, { "is_local": false, "plugin": "mccabe", "version": "0.6.1" }, { "is_local": false, "plugin": "naming", "version": "0.11.1" }, { "is_local": false, "plugin": "pycodestyle", "version": "2.7.0" }, { "is_local": false, "plugin": "pyflakes", "version": "2.3.0" }, { "is_local": false, "plugin": "rst-docstrings", "version": "0.2.3" }, { "is_local": false, "plugin": "wemake_python_styleguide", "version": "0.16.0" } ], "version": "3.9.2" } ### pip information pip 21.3.1 ### OS information macOS Monterey Version 12.0.1 </issue> <code> [start of wemake_python_styleguide/logic/tree/variables.py] 1 import ast 2 from typing import List, Union 3 4 from wemake_python_styleguide.logic import nodes 5 from wemake_python_styleguide.logic.naming import access 6 7 _VarDefinition = Union[ast.AST, ast.expr] 8 _LocalVariable = Union[ast.Name, ast.ExceptHandler] 9 10 11 def get_variable_name(node: _LocalVariable) -> str: 12 """Used to get variable names from all definitions.""" 13 if isinstance(node, ast.Name): 14 return node.id 15 return getattr(node, 'name', '') 16 17 18 def does_shadow_builtin(node: ast.AST) -> bool: 19 """ 20 We allow attributes and class-level builtin overrides. 21 22 Like: ``self.list = []`` or ``def map(self, function):`` 23 24 Why? 25 Because they cannot harm you since they do not shadow the real builtin. 26 """ 27 return ( 28 not isinstance(node, ast.Attribute) and 29 not isinstance(nodes.get_context(node), ast.ClassDef) 30 ) 31 32 33 def is_valid_block_variable_definition(node: _VarDefinition) -> bool: 34 """Is used to check either block variables are correctly defined.""" 35 if isinstance(node, ast.Tuple): 36 return all( 37 is_valid_block_variable_definition(var_definition) 38 for var_definition in node.elts 39 ) 40 return _is_valid_single(node) 41 42 43 def is_valid_unpacking_target(target: ast.expr) -> bool: 44 """Checks if unpacking target is correct.""" 45 if isinstance(target, ast.Tuple): 46 return all( 47 _is_valid_single(element) 48 for element in target.elts 49 ) 50 return _is_valid_single(target) 51 52 53 def _is_valid_single(node: _VarDefinition) -> bool: 54 return ( 55 isinstance(node, ast.Name) or 56 isinstance(node, ast.Starred) and isinstance(node.value, ast.Name) 57 ) 58 59 60 def is_getting_element_by_unpacking(targets: List[ast.expr]) -> bool: 61 """Checks if unpacking targets used to get first or last element.""" 62 if len(targets) != 2: 63 return False 64 first_item = ( 65 isinstance(targets[0], ast.Name) and 66 isinstance(targets[1], ast.Starred) and 67 _is_unused_variable_name(targets[1].value) 68 ) 69 last_item = ( 70 isinstance(targets[1], ast.Name) and 71 isinstance(targets[0], ast.Starred) and 72 _is_unused_variable_name(targets[0].value) 73 ) 74 return first_item or last_item 75 76 77 def _is_unused_variable_name(node: ast.expr) -> bool: 78 return isinstance(node, ast.Name) and access.looks_like_unused(node.id) 79 [end of wemake_python_styleguide/logic/tree/variables.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wemake_python_styleguide/logic/tree/variables.py b/wemake_python_styleguide/logic/tree/variables.py --- a/wemake_python_styleguide/logic/tree/variables.py +++ b/wemake_python_styleguide/logic/tree/variables.py @@ -62,12 +62,10 @@ if len(targets) != 2: return False first_item = ( - isinstance(targets[0], ast.Name) and isinstance(targets[1], ast.Starred) and _is_unused_variable_name(targets[1].value) ) last_item = ( - isinstance(targets[1], ast.Name) and isinstance(targets[0], ast.Starred) and _is_unused_variable_name(targets[0].value) )
{"golden_diff": "diff --git a/wemake_python_styleguide/logic/tree/variables.py b/wemake_python_styleguide/logic/tree/variables.py\n--- a/wemake_python_styleguide/logic/tree/variables.py\n+++ b/wemake_python_styleguide/logic/tree/variables.py\n@@ -62,12 +62,10 @@\n if len(targets) != 2:\n return False\n first_item = (\n- isinstance(targets[0], ast.Name) and\n isinstance(targets[1], ast.Starred) and\n _is_unused_variable_name(targets[1].value)\n )\n last_item = (\n- isinstance(targets[1], ast.Name) and\n isinstance(targets[0], ast.Starred) and\n _is_unused_variable_name(targets[0].value)\n )\n", "issue": "WPS472 only detecting one pure variable assignment\n### What's wrong\r\n\r\nWPS472 detects `name, *_ = get_address()`. \r\nWhen it comes to slightly more complex value assignments, it does not work:\r\n1. `names[0], *_ = get_address()` \r\n2. `self.name, *_ = get_address()`\r\n3. `(name, street), *_ = get_address()`\r\n\r\nAlso when extracting a second (or more values) the detection fails:\r\n1. `name, street, *_ = get_address()`\r\n2. `name, _, city, *_ = get_address()`\r\n3. `name, _, self.city, *_ = get_address()`\r\n\r\n### How it should be\r\n\r\nIt would be nice if all of the examples would also be detected as WPS472 violations.\r\n\r\n### Flake8 version and plugins\r\n\r\n{\r\n \"dependencies\": [],\r\n \"platform\": {\r\n \"python_implementation\": \"CPython\",\r\n \"python_version\": \"3.9.6\",\r\n \"system\": \"Darwin\"\r\n },\r\n \"plugins\": [\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-bandit\",\r\n \"version\": \"2.1.2\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-broken-line\",\r\n \"version\": \"0.3.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-bugbear\",\r\n \"version\": \"20.11.1\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-comprehensions\",\r\n \"version\": \"3.4.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-darglint\",\r\n \"version\": \"1.8.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-debugger\",\r\n \"version\": \"4.0.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-docstrings\",\r\n \"version\": \"1.6.0, pydocstyle: 6.0.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-eradicate\",\r\n \"version\": \"1.0.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8-string-format\",\r\n \"version\": \"0.3.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8.datetimez\",\r\n \"version\": \"20.10.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8_commas\",\r\n \"version\": \"2.0.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8_isort\",\r\n \"version\": \"4.0.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"flake8_quotes\",\r\n \"version\": \"3.2.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"mccabe\",\r\n \"version\": \"0.6.1\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"naming\",\r\n \"version\": \"0.11.1\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"pycodestyle\",\r\n \"version\": \"2.7.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"pyflakes\",\r\n \"version\": \"2.3.0\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"rst-docstrings\",\r\n \"version\": \"0.2.3\"\r\n },\r\n {\r\n \"is_local\": false,\r\n \"plugin\": \"wemake_python_styleguide\",\r\n \"version\": \"0.16.0\"\r\n }\r\n ],\r\n \"version\": \"3.9.2\"\r\n}\r\n\r\n### pip information\r\n\r\npip 21.3.1\r\n\r\n### OS information\r\n\r\nmacOS Monterey\r\nVersion 12.0.1\n", "before_files": [{"content": "import ast\nfrom typing import List, Union\n\nfrom wemake_python_styleguide.logic import nodes\nfrom wemake_python_styleguide.logic.naming import access\n\n_VarDefinition = Union[ast.AST, ast.expr]\n_LocalVariable = Union[ast.Name, ast.ExceptHandler]\n\n\ndef get_variable_name(node: _LocalVariable) -> str:\n \"\"\"Used to get variable names from all definitions.\"\"\"\n if isinstance(node, ast.Name):\n return node.id\n return getattr(node, 'name', '')\n\n\ndef does_shadow_builtin(node: ast.AST) -> bool:\n \"\"\"\n We allow attributes and class-level builtin overrides.\n\n Like: ``self.list = []`` or ``def map(self, function):``\n\n Why?\n Because they cannot harm you since they do not shadow the real builtin.\n \"\"\"\n return (\n not isinstance(node, ast.Attribute) and\n not isinstance(nodes.get_context(node), ast.ClassDef)\n )\n\n\ndef is_valid_block_variable_definition(node: _VarDefinition) -> bool:\n \"\"\"Is used to check either block variables are correctly defined.\"\"\"\n if isinstance(node, ast.Tuple):\n return all(\n is_valid_block_variable_definition(var_definition)\n for var_definition in node.elts\n )\n return _is_valid_single(node)\n\n\ndef is_valid_unpacking_target(target: ast.expr) -> bool:\n \"\"\"Checks if unpacking target is correct.\"\"\"\n if isinstance(target, ast.Tuple):\n return all(\n _is_valid_single(element)\n for element in target.elts\n )\n return _is_valid_single(target)\n\n\ndef _is_valid_single(node: _VarDefinition) -> bool:\n return (\n isinstance(node, ast.Name) or\n isinstance(node, ast.Starred) and isinstance(node.value, ast.Name)\n )\n\n\ndef is_getting_element_by_unpacking(targets: List[ast.expr]) -> bool:\n \"\"\"Checks if unpacking targets used to get first or last element.\"\"\"\n if len(targets) != 2:\n return False\n first_item = (\n isinstance(targets[0], ast.Name) and\n isinstance(targets[1], ast.Starred) and\n _is_unused_variable_name(targets[1].value)\n )\n last_item = (\n isinstance(targets[1], ast.Name) and\n isinstance(targets[0], ast.Starred) and\n _is_unused_variable_name(targets[0].value)\n )\n return first_item or last_item\n\n\ndef _is_unused_variable_name(node: ast.expr) -> bool:\n return isinstance(node, ast.Name) and access.looks_like_unused(node.id)\n", "path": "wemake_python_styleguide/logic/tree/variables.py"}]}
2,154
175
gh_patches_debug_212
rasdani/github-patches
git_diff
pulp__pulpcore-3462
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Database errors raised when importing content **Version** Main pulpcore branch. The issue arose after merging the labels refractor work (https://github.com/pulp/pulpcore/commit/4e25949176d72c5dbe1c7623a9c47d253a18b085) . Reproducible in pulp_file and pulp_rpm. **Describe the bug** ``` pulp [d32341b1-78b2-44da-b43d-e51121df9e95]: pulpcore.tasking.pulpcore_worker:INFO: Task 4c2b456b-d9a8-4238-bb45-7b63f403229c failed (Unexpected end of string LINE 1: ...le.file', '365f08db-ac00-4e21-8abf-af0f047064cd', '{}', '', ... ^ ) pulp [d32341b1-78b2-44da-b43d-e51121df9e95]: pulpcore.tasking.pulpcore_worker:INFO: File "/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py", line 444, in _perform_task result = func(*args, **kwargs) File "/home/vagrant/devel/pulpcore/pulpcore/app/tasks/importer.py", line 236, in import_repository_version for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True): File "/home/vagrant/devel/pulpcore/pulpcore/app/tasks/importer.py", line 138, in _import_file a_result = resource.import_data(data, raise_errors=True) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py", line 819, in import_data return self.import_data_inner( File "/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py", line 871, in import_data_inner raise row_result.errors[-1].error File "/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py", line 743, in import_row self.save_instance(instance, new, using_transactions, dry_run) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py", line 500, in save_instance instance.save() File "/home/vagrant/devel/pulpcore/pulpcore/app/models/repository.py", line 95, in save super().save(*args, **kwargs) File "/home/vagrant/devel/pulpcore/pulpcore/app/models/base.py", line 203, in save return super().save(*args, **kwargs) File "/usr/lib64/python3.10/contextlib.py", line 79, in inner return func(*args, **kwds) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django_lifecycle/mixins.py", line 169, in save save(*args, **kwargs) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py", line 739, in save self.save_base(using=using, force_insert=force_insert, File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py", line 775, in save_base parent_inserted = self._save_parents(cls, using, update_fields) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py", line 804, in _save_parents updated = self._save_table( File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py", line 881, in _save_table results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py", line 919, in _do_insert return manager._insert( File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py", line 85, in manager_method return getattr(self.get_queryset(), name)(*args, **kwargs) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py", line 1270, in _insert return query.get_compiler(using=using).execute_sql(returning_fields) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py", line 1416, in execute_sql cursor.execute(sql, params) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 66, in execute return self._execute_with_wrappers(sql, params, many=False, executor=self._execute) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 75, in _execute_with_wrappers return executor(sql, params, many, context) File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 79, in _execute with self.db.wrap_database_errors: File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py", line 90, in __exit__ raise dj_exc_value.with_traceback(traceback) from exc_value File "/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py", line 84, in _execute return self.cursor.execute(sql, params) ``` </issue> <code> [start of pulpcore/app/modelresource.py] 1 from import_export import fields 2 from import_export.widgets import ForeignKeyWidget 3 from logging import getLogger 4 5 from pulpcore.app.models.content import ( 6 Artifact, 7 Content, 8 ContentArtifact, 9 ) 10 from pulpcore.app.models.repository import Repository 11 from pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS 12 from pulpcore.plugin.importexport import QueryModelResource 13 14 15 log = getLogger(__name__) 16 17 18 # 19 # Artifact and Repository are different from other import-export entities, in that they are not 20 # repo-version-specific. 21 # 22 class ArtifactResource(QueryModelResource): 23 """Resource for import/export of artifacts.""" 24 25 def before_import_row(self, row, **kwargs): 26 """ 27 Sets digests to None if they are blank strings. 28 29 Args: 30 row (tablib.Dataset row): incoming import-row representing a single Variant. 31 kwargs: args passed along from the import() call. 32 33 """ 34 # the export converts None to blank strings but sha384 and sha512 have unique constraints 35 # that get triggered if they are blank. convert checksums back into None if they are blank. 36 for checksum in ALL_KNOWN_CONTENT_CHECKSUMS: 37 if row[checksum] == "": 38 row[checksum] = None 39 40 class Meta: 41 model = Artifact 42 exclude = ( 43 "pulp_id", 44 "pulp_created", 45 "pulp_last_updated", 46 ) 47 import_id_fields = ("sha256",) 48 49 50 class RepositoryResource(QueryModelResource): 51 class Meta: 52 model = Repository 53 import_id_fields = ("name",) 54 exclude = ( 55 "pulp_id", 56 "pulp_created", 57 "pulp_last_updated", 58 "content", 59 "next_version", 60 "repository_ptr", 61 "remote", 62 ) 63 64 65 class ContentArtifactResource(QueryModelResource): 66 """ 67 Handles import/export of the ContentArtifact model. 68 69 ContentArtifact is different from other import-export entities because it has no 'natural key' 70 other than a pulp_id, which aren't shared across instances. We do some magic to link up 71 ContentArtifacts to their matching (already-imported) Content. 72 73 Some plugin-models have sub-repositories. We take advantage of the content-mapping 74 machinery to account for those contentartifacts as well. 75 """ 76 77 artifact = fields.Field( 78 column_name="artifact", attribute="artifact", widget=ForeignKeyWidget(Artifact, "sha256") 79 ) 80 81 def __init__(self, repo_version=None, content_mapping=None): 82 self.content_mapping = content_mapping 83 super().__init__(repo_version) 84 85 def before_import_row(self, row, **kwargs): 86 """ 87 Fixes the content-ptr of an incoming content-artifact row at import time. 88 89 Finds the 'original uuid' of the Content for this row, looks it up as the 90 'upstream_id' of imported Content, and then replaces the Content-pk with its 91 (new) uuid. 92 93 Args: 94 row (tablib.Dataset row): incoming import-row representing a single ContentArtifact. 95 kwargs: args passed along from the import() call. 96 97 Returns: 98 (tablib.Dataset row): row that now points to the new downstream uuid for its content. 99 """ 100 101 linked_content = Content.objects.get(upstream_id=row["content"]) 102 row["content"] = str(linked_content.pulp_id) 103 104 def set_up_queryset(self): 105 vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content) 106 if self.content_mapping: 107 all_content = [] 108 for content_ids in self.content_mapping.values(): 109 all_content.extend(content_ids) 110 vers_content = vers_content.union( 111 ContentArtifact.objects.filter(content__in=all_content) 112 ) 113 return vers_content.order_by("content", "relative_path") 114 115 class Meta: 116 model = ContentArtifact 117 import_id_fields = ( 118 "content", 119 "relative_path", 120 ) 121 exclude = ( 122 "pulp_created", 123 "pulp_last_updated", 124 "_artifacts", 125 "pulp_id", 126 ) 127 [end of pulpcore/app/modelresource.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py --- a/pulpcore/app/modelresource.py +++ b/pulpcore/app/modelresource.py @@ -59,6 +59,7 @@ "next_version", "repository_ptr", "remote", + "pulp_labels", )
{"golden_diff": "diff --git a/pulpcore/app/modelresource.py b/pulpcore/app/modelresource.py\n--- a/pulpcore/app/modelresource.py\n+++ b/pulpcore/app/modelresource.py\n@@ -59,6 +59,7 @@\n \"next_version\",\n \"repository_ptr\",\n \"remote\",\n+ \"pulp_labels\",\n )\n", "issue": "Database errors raised when importing content\n**Version**\r\nMain pulpcore branch. The issue arose after merging the labels refractor work (https://github.com/pulp/pulpcore/commit/4e25949176d72c5dbe1c7623a9c47d253a18b085) . \r\n\r\nReproducible in pulp_file and pulp_rpm. \r\n\r\n**Describe the bug**\r\n```\r\npulp [d32341b1-78b2-44da-b43d-e51121df9e95]: pulpcore.tasking.pulpcore_worker:INFO: Task 4c2b456b-d9a8-4238-bb45-7b63f403229c failed (Unexpected end of string\r\nLINE 1: ...le.file', '365f08db-ac00-4e21-8abf-af0f047064cd', '{}', '', ...\r\n ^\r\n)\r\npulp [d32341b1-78b2-44da-b43d-e51121df9e95]: pulpcore.tasking.pulpcore_worker:INFO: File \"/home/vagrant/devel/pulpcore/pulpcore/tasking/pulpcore_worker.py\", line 444, in _perform_task\r\n result = func(*args, **kwargs)\r\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/tasks/importer.py\", line 236, in import_repository_version\r\n for a_result in _import_file(os.path.join(rv_path, filename), res_class, retry=True):\r\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/tasks/importer.py\", line 138, in _import_file\r\n a_result = resource.import_data(data, raise_errors=True)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py\", line 819, in import_data\r\n return self.import_data_inner(\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py\", line 871, in import_data_inner\r\n raise row_result.errors[-1].error\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py\", line 743, in import_row\r\n self.save_instance(instance, new, using_transactions, dry_run)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/import_export/resources.py\", line 500, in save_instance\r\n instance.save()\r\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/models/repository.py\", line 95, in save\r\n super().save(*args, **kwargs)\r\n File \"/home/vagrant/devel/pulpcore/pulpcore/app/models/base.py\", line 203, in save\r\n return super().save(*args, **kwargs)\r\n File \"/usr/lib64/python3.10/contextlib.py\", line 79, in inner\r\n return func(*args, **kwds)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django_lifecycle/mixins.py\", line 169, in save\r\n save(*args, **kwargs)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py\", line 739, in save\r\n self.save_base(using=using, force_insert=force_insert,\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py\", line 775, in save_base\r\n parent_inserted = self._save_parents(cls, using, update_fields)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py\", line 804, in _save_parents\r\n updated = self._save_table(\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py\", line 881, in _save_table\r\n results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/base.py\", line 919, in _do_insert\r\n return manager._insert(\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/manager.py\", line 85, in manager_method\r\n return getattr(self.get_queryset(), name)(*args, **kwargs)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/query.py\", line 1270, in _insert\r\n return query.get_compiler(using=using).execute_sql(returning_fields)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/models/sql/compiler.py\", line 1416, in execute_sql\r\n cursor.execute(sql, params)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 66, in execute\r\n return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 75, in _execute_with_wrappers\r\n return executor(sql, params, many, context)\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 79, in _execute\r\n with self.db.wrap_database_errors:\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/utils.py\", line 90, in __exit__\r\n raise dj_exc_value.with_traceback(traceback) from exc_value\r\n File \"/usr/local/lib/pulp/lib64/python3.10/site-packages/django/db/backends/utils.py\", line 84, in _execute\r\n return self.cursor.execute(sql, params)\r\n```\n", "before_files": [{"content": "from import_export import fields\nfrom import_export.widgets import ForeignKeyWidget\nfrom logging import getLogger\n\nfrom pulpcore.app.models.content import (\n Artifact,\n Content,\n ContentArtifact,\n)\nfrom pulpcore.app.models.repository import Repository\nfrom pulpcore.constants import ALL_KNOWN_CONTENT_CHECKSUMS\nfrom pulpcore.plugin.importexport import QueryModelResource\n\n\nlog = getLogger(__name__)\n\n\n#\n# Artifact and Repository are different from other import-export entities, in that they are not\n# repo-version-specific.\n#\nclass ArtifactResource(QueryModelResource):\n \"\"\"Resource for import/export of artifacts.\"\"\"\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Sets digests to None if they are blank strings.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single Variant.\n kwargs: args passed along from the import() call.\n\n \"\"\"\n # the export converts None to blank strings but sha384 and sha512 have unique constraints\n # that get triggered if they are blank. convert checksums back into None if they are blank.\n for checksum in ALL_KNOWN_CONTENT_CHECKSUMS:\n if row[checksum] == \"\":\n row[checksum] = None\n\n class Meta:\n model = Artifact\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n )\n import_id_fields = (\"sha256\",)\n\n\nclass RepositoryResource(QueryModelResource):\n class Meta:\n model = Repository\n import_id_fields = (\"name\",)\n exclude = (\n \"pulp_id\",\n \"pulp_created\",\n \"pulp_last_updated\",\n \"content\",\n \"next_version\",\n \"repository_ptr\",\n \"remote\",\n )\n\n\nclass ContentArtifactResource(QueryModelResource):\n \"\"\"\n Handles import/export of the ContentArtifact model.\n\n ContentArtifact is different from other import-export entities because it has no 'natural key'\n other than a pulp_id, which aren't shared across instances. We do some magic to link up\n ContentArtifacts to their matching (already-imported) Content.\n\n Some plugin-models have sub-repositories. We take advantage of the content-mapping\n machinery to account for those contentartifacts as well.\n \"\"\"\n\n artifact = fields.Field(\n column_name=\"artifact\", attribute=\"artifact\", widget=ForeignKeyWidget(Artifact, \"sha256\")\n )\n\n def __init__(self, repo_version=None, content_mapping=None):\n self.content_mapping = content_mapping\n super().__init__(repo_version)\n\n def before_import_row(self, row, **kwargs):\n \"\"\"\n Fixes the content-ptr of an incoming content-artifact row at import time.\n\n Finds the 'original uuid' of the Content for this row, looks it up as the\n 'upstream_id' of imported Content, and then replaces the Content-pk with its\n (new) uuid.\n\n Args:\n row (tablib.Dataset row): incoming import-row representing a single ContentArtifact.\n kwargs: args passed along from the import() call.\n\n Returns:\n (tablib.Dataset row): row that now points to the new downstream uuid for its content.\n \"\"\"\n\n linked_content = Content.objects.get(upstream_id=row[\"content\"])\n row[\"content\"] = str(linked_content.pulp_id)\n\n def set_up_queryset(self):\n vers_content = ContentArtifact.objects.filter(content__in=self.repo_version.content)\n if self.content_mapping:\n all_content = []\n for content_ids in self.content_mapping.values():\n all_content.extend(content_ids)\n vers_content = vers_content.union(\n ContentArtifact.objects.filter(content__in=all_content)\n )\n return vers_content.order_by(\"content\", \"relative_path\")\n\n class Meta:\n model = ContentArtifact\n import_id_fields = (\n \"content\",\n \"relative_path\",\n )\n exclude = (\n \"pulp_created\",\n \"pulp_last_updated\",\n \"_artifacts\",\n \"pulp_id\",\n )\n", "path": "pulpcore/app/modelresource.py"}]}
3,043
75
gh_patches_debug_24667
rasdani/github-patches
git_diff
saleor__saleor-2665
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement product types section in dashboard Blocked by #2679 </issue> <code> [start of saleor/core/__init__.py] 1 from django.conf import settings 2 from django.core.checks import Warning, register 3 from django.utils.translation import pgettext_lazy 4 5 TOKEN_PATTERN = ('(?P<token>[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}' 6 '-[0-9a-z]{12})') 7 8 9 @register() 10 def check_session_caching(app_configs, **kwargs): # pragma: no cover 11 errors = [] 12 cached_engines = { 13 'django.contrib.sessions.backends.cache', 14 'django.contrib.sessions.backends.cached_db'} 15 if ('locmem' in settings.CACHES['default']['BACKEND'] and 16 settings.SESSION_ENGINE in cached_engines): 17 errors.append( 18 Warning( 19 'Session caching cannot work with locmem backend', 20 'User sessions need to be globally shared, use a cache server' 21 ' like Redis.', 22 'saleor.W001')) 23 return errors 24 25 26 class TaxRateType: 27 ACCOMODATION = 'accomodation' 28 ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events' 29 ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events' 30 ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events' 31 ADVERTISING = 'advertising' 32 AGRICULTURAL_SUPPLIES = 'agricultural supplies' 33 BABY_FOODSTUFFS = 'baby foodstuffs' 34 BIKES = 'bikes' 35 BOOKS = 'books' 36 CHILDRENDS_CLOTHING = 'childrens clothing' 37 DOMESTIC_FUEL = 'domestic fuel' 38 DOMESTIC_SERVICES = 'domestic services' 39 E_BOOKS = 'e-books' 40 FOODSTUFFS = 'foodstuffs' 41 HOTELS = 'hotels' 42 MEDICAL = 'medical' 43 NEWSPAPERS = 'newspapers' 44 PASSENGER_TRANSPORT = 'passenger transport' 45 PHARMACEUTICALS = 'pharmaceuticals' 46 PROPERTY_RENOVATIONS = 'property renovations' 47 RESTAURANTS = 'restaurants' 48 SOCIAL_HOUSING = 'social housing' 49 STANDARD = 'standard' 50 WATER = 'water' 51 WINE = 'wine' 52 53 CHOICES = ( 54 (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')), 55 (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy( 56 'VAT rate type', 'admission to cultural events')), 57 (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy( 58 'VAT rate type', 'admission to entertainment events')), 59 (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy( 60 'VAT rate type', 'admission to sporting events')), 61 (ADVERTISING, pgettext_lazy('VAT rate type', 'advertising')), 62 (AGRICULTURAL_SUPPLIES, pgettext_lazy( 63 'VAT rate type', 'agricultural supplies')), 64 (BABY_FOODSTUFFS, pgettext_lazy('VAT rate type', 'baby foodstuffs')), 65 (BIKES, pgettext_lazy('VAT rate type', 'bikes')), 66 (BOOKS, pgettext_lazy('VAT rate type', 'books')), 67 (CHILDRENDS_CLOTHING, pgettext_lazy( 68 'VAT rate type', 'childrens clothing')), 69 (DOMESTIC_FUEL, pgettext_lazy('VAT rate type', 'domestic fuel')), 70 (DOMESTIC_SERVICES, pgettext_lazy( 71 'VAT rate type', 'domestic services')), 72 (E_BOOKS, pgettext_lazy('VAT rate type', 'e-books')), 73 (FOODSTUFFS, pgettext_lazy('VAT rate type', 'foodstuffs')), 74 (HOTELS, pgettext_lazy('VAT rate type', 'hotels')), 75 (MEDICAL, pgettext_lazy('VAT rate type', 'medical')), 76 (NEWSPAPERS, pgettext_lazy('VAT rate type', 'newspapers')), 77 (PASSENGER_TRANSPORT, pgettext_lazy( 78 'VAT rate type', 'passenger transport')), 79 (PHARMACEUTICALS, pgettext_lazy( 80 'VAT rate type', 'pharmaceuticals')), 81 (PROPERTY_RENOVATIONS, pgettext_lazy( 82 'VAT rate type', 'property renovations')), 83 (RESTAURANTS, pgettext_lazy('VAT rate type', 'restaurants')), 84 (SOCIAL_HOUSING, pgettext_lazy('VAT rate type', 'social housing')), 85 (STANDARD, pgettext_lazy('VAT rate type', 'standard')), 86 (WATER, pgettext_lazy('VAT rate type', 'water'))) 87 [end of saleor/core/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/core/__init__.py b/saleor/core/__init__.py --- a/saleor/core/__init__.py +++ b/saleor/core/__init__.py @@ -26,7 +26,7 @@ class TaxRateType: ACCOMODATION = 'accomodation' ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events' - ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events' + ADMISSION_TO_ENTERTAINMENT_EVENTS = 'admission to entertainment events' ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events' ADVERTISING = 'advertising' AGRICULTURAL_SUPPLIES = 'agricultural supplies' @@ -54,7 +54,7 @@ (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')), (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy( 'VAT rate type', 'admission to cultural events')), - (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy( + (ADMISSION_TO_ENTERTAINMENT_EVENTS, pgettext_lazy( 'VAT rate type', 'admission to entertainment events')), (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy( 'VAT rate type', 'admission to sporting events')),
{"golden_diff": "diff --git a/saleor/core/__init__.py b/saleor/core/__init__.py\n--- a/saleor/core/__init__.py\n+++ b/saleor/core/__init__.py\n@@ -26,7 +26,7 @@\n class TaxRateType:\n ACCOMODATION = 'accomodation'\n ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'\n- ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'\n+ ADMISSION_TO_ENTERTAINMENT_EVENTS = 'admission to entertainment events'\n ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'\n ADVERTISING = 'advertising'\n AGRICULTURAL_SUPPLIES = 'agricultural supplies'\n@@ -54,7 +54,7 @@\n (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),\n (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to cultural events')),\n- (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(\n+ (ADMISSION_TO_ENTERTAINMENT_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to entertainment events')),\n (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to sporting events')),\n", "issue": "Implement product types section in dashboard\nBlocked by #2679 \n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.checks import Warning, register\nfrom django.utils.translation import pgettext_lazy\n\nTOKEN_PATTERN = ('(?P<token>[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}'\n '-[0-9a-z]{12})')\n\n\n@register()\ndef check_session_caching(app_configs, **kwargs): # pragma: no cover\n errors = []\n cached_engines = {\n 'django.contrib.sessions.backends.cache',\n 'django.contrib.sessions.backends.cached_db'}\n if ('locmem' in settings.CACHES['default']['BACKEND'] and\n settings.SESSION_ENGINE in cached_engines):\n errors.append(\n Warning(\n 'Session caching cannot work with locmem backend',\n 'User sessions need to be globally shared, use a cache server'\n ' like Redis.',\n 'saleor.W001'))\n return errors\n\n\nclass TaxRateType:\n ACCOMODATION = 'accomodation'\n ADMISSION_TO_CULTURAL_EVENTS = 'admission to cultural events'\n ADMISSION_TO_ENTERAINMENT_EVENTS = 'admission to entertainment events'\n ADMISSION_TO_SPORTING_EVENTS = 'admission to sporting events'\n ADVERTISING = 'advertising'\n AGRICULTURAL_SUPPLIES = 'agricultural supplies'\n BABY_FOODSTUFFS = 'baby foodstuffs'\n BIKES = 'bikes'\n BOOKS = 'books'\n CHILDRENDS_CLOTHING = 'childrens clothing'\n DOMESTIC_FUEL = 'domestic fuel'\n DOMESTIC_SERVICES = 'domestic services'\n E_BOOKS = 'e-books'\n FOODSTUFFS = 'foodstuffs'\n HOTELS = 'hotels'\n MEDICAL = 'medical'\n NEWSPAPERS = 'newspapers'\n PASSENGER_TRANSPORT = 'passenger transport'\n PHARMACEUTICALS = 'pharmaceuticals'\n PROPERTY_RENOVATIONS = 'property renovations'\n RESTAURANTS = 'restaurants'\n SOCIAL_HOUSING = 'social housing'\n STANDARD = 'standard'\n WATER = 'water'\n WINE = 'wine'\n\n CHOICES = (\n (ACCOMODATION, pgettext_lazy('VAT rate type', 'accommodation')),\n (ADMISSION_TO_CULTURAL_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to cultural events')),\n (ADMISSION_TO_ENTERAINMENT_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to entertainment events')),\n (ADMISSION_TO_SPORTING_EVENTS, pgettext_lazy(\n 'VAT rate type', 'admission to sporting events')),\n (ADVERTISING, pgettext_lazy('VAT rate type', 'advertising')),\n (AGRICULTURAL_SUPPLIES, pgettext_lazy(\n 'VAT rate type', 'agricultural supplies')),\n (BABY_FOODSTUFFS, pgettext_lazy('VAT rate type', 'baby foodstuffs')),\n (BIKES, pgettext_lazy('VAT rate type', 'bikes')),\n (BOOKS, pgettext_lazy('VAT rate type', 'books')),\n (CHILDRENDS_CLOTHING, pgettext_lazy(\n 'VAT rate type', 'childrens clothing')),\n (DOMESTIC_FUEL, pgettext_lazy('VAT rate type', 'domestic fuel')),\n (DOMESTIC_SERVICES, pgettext_lazy(\n 'VAT rate type', 'domestic services')),\n (E_BOOKS, pgettext_lazy('VAT rate type', 'e-books')),\n (FOODSTUFFS, pgettext_lazy('VAT rate type', 'foodstuffs')),\n (HOTELS, pgettext_lazy('VAT rate type', 'hotels')),\n (MEDICAL, pgettext_lazy('VAT rate type', 'medical')),\n (NEWSPAPERS, pgettext_lazy('VAT rate type', 'newspapers')),\n (PASSENGER_TRANSPORT, pgettext_lazy(\n 'VAT rate type', 'passenger transport')),\n (PHARMACEUTICALS, pgettext_lazy(\n 'VAT rate type', 'pharmaceuticals')),\n (PROPERTY_RENOVATIONS, pgettext_lazy(\n 'VAT rate type', 'property renovations')),\n (RESTAURANTS, pgettext_lazy('VAT rate type', 'restaurants')),\n (SOCIAL_HOUSING, pgettext_lazy('VAT rate type', 'social housing')),\n (STANDARD, pgettext_lazy('VAT rate type', 'standard')),\n (WATER, pgettext_lazy('VAT rate type', 'water')))\n", "path": "saleor/core/__init__.py"}]}
1,752
301
gh_patches_debug_32289
rasdani/github-patches
git_diff
translate__pootle-5837
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plural strings not marked as that While reviewing strings for 2.8 release, those are not in plural forms: - http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826598 - http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826597 - http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826594 - http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826584 </issue> <code> [start of pootle/apps/pootle_language/views.py] 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 from django.contrib import messages 10 from django.http import Http404 11 from django.shortcuts import redirect, render 12 from django.urls import reverse 13 from django.utils.functional import cached_property 14 from django.utils.lru_cache import lru_cache 15 from django.utils.safestring import mark_safe 16 17 from pootle.core.browser import make_project_item 18 from pootle.core.decorators import ( 19 get_object_or_404, get_path_obj, permission_required) 20 from pootle.core.views import PootleBrowseView, PootleTranslateView 21 from pootle.core.views.admin import PootleFormView 22 from pootle.core.views.decorators import requires_permission, set_permissions 23 from pootle.core.views.formtable import Formtable 24 from pootle.core.views.mixins import PootleJSONMixin 25 from pootle.i18n import formatter 26 from pootle.i18n.gettext import tr_lang, ugettext_lazy as _ 27 from pootle_misc.util import cmp_by_last_activity 28 from pootle_store.constants import STATES_MAP 29 30 from .apps import PootleLanguageConfig 31 from .forms import ( 32 LanguageSpecialCharsForm, LanguageSuggestionAdminForm, LanguageTeamAdminForm, 33 LanguageTeamNewMemberSearchForm) 34 from .models import Language 35 36 37 class LanguageMixin(object): 38 ns = "pootle.language" 39 sw_version = PootleLanguageConfig.version 40 model = Language 41 browse_url_path = "pootle-language-browse" 42 translate_url_path = "pootle-language-translate" 43 template_extends = 'languages/base.html' 44 45 @property 46 def language(self): 47 return self.object 48 49 @property 50 def permission_context(self): 51 return self.get_object().directory 52 53 @property 54 def url_kwargs(self): 55 return {"language_code": self.object.code} 56 57 @lru_cache() 58 def get_object(self): 59 lang = Language.get_canonical(self.kwargs["language_code"]) 60 if lang is None: 61 raise Http404 62 return lang 63 64 def get(self, *args, **kwargs): 65 self.object = self.get_object() 66 if self.object.code != kwargs["language_code"]: 67 return redirect( 68 self.url_pattern_name, 69 self.object.code, 70 permanent=True) 71 return super(LanguageMixin, self).get(*args, **kwargs) 72 73 74 class LanguageBrowseView(LanguageMixin, PootleBrowseView): 75 url_pattern_name = "pootle-language-browse" 76 view_name = "language" 77 78 @cached_property 79 def object_children(self): 80 items = [make_project_item(tp) 81 for tp in self.object.get_children_for_user(self.request.user)] 82 items = self.add_child_stats(items) 83 items.sort(cmp_by_last_activity) 84 return items 85 86 @property 87 def language(self): 88 return { 89 'code': self.object.code, 90 'name': tr_lang(self.object.fullname)} 91 92 def get(self, *args, **kwargs): 93 response = super(LanguageBrowseView, self).get(*args, **kwargs) 94 response.set_cookie('pootle-language', self.object.code) 95 return response 96 97 98 class LanguageTranslateView(LanguageMixin, PootleTranslateView): 99 url_pattern_name = "pootle-language-translate" 100 101 102 @get_path_obj 103 @permission_required('administrate') 104 def language_characters_admin(request, language): 105 form = LanguageSpecialCharsForm(request.POST 106 if request.method == 'POST' 107 else None, 108 instance=language) 109 if form.is_valid(): 110 form.save() 111 return redirect('pootle-language-browse', language.code) 112 113 ctx = { 114 'page': 'admin-characters', 115 116 'browse_url': reverse('pootle-language-browse', kwargs={ 117 'language_code': language.code, 118 }), 119 'translate_url': reverse('pootle-language-translate', kwargs={ 120 'language_code': language.code, 121 }), 122 123 'language': language, 124 'directory': language.directory, 125 'form': form, 126 } 127 128 return render(request, 'languages/admin/characters.html', ctx) 129 130 131 class SuggestionFormtable(Formtable): 132 row_field = "suggestions" 133 filters_template = "languages/admin/includes/suggestions_header.html" 134 135 @property 136 def messages(self): 137 return self.kwargs.get("messages", []) 138 139 140 class SuggestionDisplay(object): 141 142 def __init__(self, suggestion): 143 self.__suggestion__ = suggestion 144 145 @property 146 def unit(self): 147 return self.__suggestion__.unit.source_f 148 149 @property 150 def project(self): 151 tp = self.__suggestion__.unit.store.translation_project 152 return mark_safe( 153 "<a href='%s'>%s</a>" 154 % (tp.get_absolute_url(), 155 tp.project.code)) 156 157 @property 158 def unit_state(self): 159 return STATES_MAP[self.__suggestion__.unit.state] 160 161 @property 162 def unit_link(self): 163 return mark_safe( 164 "<a href='%s'>#%s</a>" 165 % (self.__suggestion__.unit.get_translate_url(), 166 self.__suggestion__.unit.id)) 167 168 def __getattr__(self, k): 169 try: 170 return getattr(self.__suggestion__, k) 171 except AttributeError: 172 return self.__getattribute__(k) 173 174 175 class PootleLanguageAdminFormView(PootleFormView): 176 177 @property 178 def permission_context(self): 179 return self.language.directory 180 181 @set_permissions 182 @requires_permission("administrate") 183 def dispatch(self, request, *args, **kwargs): 184 # get funky with the request 8/ 185 return super( 186 PootleLanguageAdminFormView, self).dispatch(request, *args, **kwargs) 187 188 @cached_property 189 def language(self): 190 return get_object_or_404( 191 Language.objects.select_related("directory"), 192 code=self.kwargs["language_code"]) 193 194 def get_form_kwargs(self): 195 kwargs = super(PootleLanguageAdminFormView, self).get_form_kwargs() 196 kwargs["language"] = self.language 197 return kwargs 198 199 @property 200 def success_kwargs(self): 201 return dict(language_code=self.language.code) 202 203 204 class LanguageSuggestionAdminView(PootleLanguageAdminFormView): 205 template_name = 'languages/admin/language_team_suggestions.html' 206 form_class = LanguageSuggestionAdminForm 207 success_url_pattern = "pootle-language-admin-suggestions" 208 formtable_columns = ( 209 _("Unit"), 210 _("State"), 211 _("Source"), 212 _("Suggestion"), 213 _("Suggested by"), 214 _("Suggested at"), 215 _("Project")) 216 217 @property 218 def default_form_kwargs(self): 219 return dict( 220 page_no=1, 221 results_per_page=10) 222 223 def add_success_message(self, form): 224 count = ( 225 form.fields["suggestions"].queryset.count() 226 if form.cleaned_data["select_all"] 227 else len(form.cleaned_data["suggestions"])) 228 reject_and_notify = ( 229 form.cleaned_data["actions"] == "reject" 230 and form.cleaned_data["comment"]) 231 accept_and_notify = ( 232 form.cleaned_data["actions"] == "accept" 233 and form.cleaned_data["comment"]) 234 if reject_and_notify: 235 message = _( 236 "Rejected %s suggestions with comment. Users will be notified", 237 count) 238 elif accept_and_notify: 239 message = _( 240 "Accepted %s suggestions with comment. Users will be notified", 241 count) 242 elif form.cleaned_data["actions"] == "reject": 243 message = _("Rejected %s suggestions", count) 244 else: 245 message = _("Accepted %s suggestions", count) 246 messages.success(self.request, message) 247 248 def get_context_data(self, **kwargs): 249 context = super( 250 LanguageSuggestionAdminView, self).get_context_data(**kwargs) 251 context["page"] = "admin-suggestions" 252 context["language"] = self.language 253 form = context["form"] 254 form.is_valid() 255 batch = form.batch() 256 form.fields["suggestions"].choices = [ 257 (item.id, SuggestionDisplay(item)) 258 for item in 259 batch.object_list] 260 context["formtable"] = SuggestionFormtable( 261 form, 262 columns=self.formtable_columns, 263 page=batch, 264 messages=messages.get_messages(self.request)) 265 return context 266 267 def get_form_kwargs(self): 268 kwargs = super(LanguageSuggestionAdminView, self).get_form_kwargs() 269 if not self.request.POST: 270 kwargs["data"] = self.default_form_kwargs 271 kwargs["user"] = self.request.user 272 return kwargs 273 274 275 class LanguageTeamAdminFormView(PootleLanguageAdminFormView): 276 form_class = LanguageTeamAdminForm 277 template_name = "languages/admin/language_team.html" 278 success_url_pattern = "pootle-language-admin-team" 279 280 def get_context_data(self, **kwargs): 281 context = super(LanguageTeamAdminFormView, self).get_context_data(**kwargs) 282 form = context["form"] 283 context["tps"] = self.language.translationproject_set.exclude( 284 project__disabled=True) 285 stats = self.language.data_tool.get_stats( 286 include_children=False, 287 user=self.request.user) 288 keys = ("total", "critical", "incomplete", "translated", "fuzzy", 289 "untranslated") 290 for k in keys: 291 if k in stats: 292 stats[k + "_display"] = formatter.number(stats[k]) 293 context["stats"] = stats 294 context["suggestions"] = form.language_team.suggestions 295 suggestions_count = 0 296 if context["suggestions"]: 297 suggestions_count = context["suggestions"].count() 298 context["suggestions_display"] = formatter.number(suggestions_count) 299 context["language"] = self.language 300 context["page"] = "admin-team" 301 context["browse_url"] = reverse( 302 "pootle-language-browse", 303 kwargs=dict(language_code=self.language.code)) 304 context["translate_url"] = reverse( 305 "pootle-language-translate", 306 kwargs=dict(language_code=self.language.code)) 307 return context 308 309 310 class LanguageTeamAdminNewMembersJSON(PootleJSONMixin, PootleLanguageAdminFormView): 311 form_class = LanguageTeamNewMemberSearchForm 312 313 def get_context_data(self, **kwargs): 314 context = super( 315 LanguageTeamAdminNewMembersJSON, self).get_context_data(**kwargs) 316 form = context["form"] 317 return ( 318 dict(items=form.search()) 319 if form.is_valid() 320 else dict(items=[])) 321 322 def get_form_kwargs(self): 323 kwargs = super(LanguageTeamAdminNewMembersJSON, self).get_form_kwargs() 324 kwargs["data"] = self.request.GET 325 return kwargs 326 [end of pootle/apps/pootle_language/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/apps/pootle_language/views.py b/pootle/apps/pootle_language/views.py --- a/pootle/apps/pootle_language/views.py +++ b/pootle/apps/pootle_language/views.py @@ -23,7 +23,7 @@ from pootle.core.views.formtable import Formtable from pootle.core.views.mixins import PootleJSONMixin from pootle.i18n import formatter -from pootle.i18n.gettext import tr_lang, ugettext_lazy as _ +from pootle.i18n.gettext import tr_lang, ugettext_lazy as _, ungettext_lazy from pootle_misc.util import cmp_by_last_activity from pootle_store.constants import STATES_MAP @@ -232,17 +232,25 @@ form.cleaned_data["actions"] == "accept" and form.cleaned_data["comment"]) if reject_and_notify: - message = _( + message = ungettext_lazy( + "Rejected %s suggestion with comment. User will be notified", "Rejected %s suggestions with comment. Users will be notified", count) elif accept_and_notify: - message = _( + message = ungettext_lazy( + "Accepted %s suggestion with comment. User will be notified", "Accepted %s suggestions with comment. Users will be notified", count) elif form.cleaned_data["actions"] == "reject": - message = _("Rejected %s suggestions", count) + message = ungettext_lazy( + "Rejected %s suggestion", + "Rejected %s suggestions", + count) else: - message = _("Accepted %s suggestions", count) + message = ungettext_lazy( + "Accepted %s suggestion", + "Accepted %s suggestions", + count) messages.success(self.request, message) def get_context_data(self, **kwargs):
{"golden_diff": "diff --git a/pootle/apps/pootle_language/views.py b/pootle/apps/pootle_language/views.py\n--- a/pootle/apps/pootle_language/views.py\n+++ b/pootle/apps/pootle_language/views.py\n@@ -23,7 +23,7 @@\n from pootle.core.views.formtable import Formtable\n from pootle.core.views.mixins import PootleJSONMixin\n from pootle.i18n import formatter\n-from pootle.i18n.gettext import tr_lang, ugettext_lazy as _\n+from pootle.i18n.gettext import tr_lang, ugettext_lazy as _, ungettext_lazy\n from pootle_misc.util import cmp_by_last_activity\n from pootle_store.constants import STATES_MAP\n \n@@ -232,17 +232,25 @@\n form.cleaned_data[\"actions\"] == \"accept\"\n and form.cleaned_data[\"comment\"])\n if reject_and_notify:\n- message = _(\n+ message = ungettext_lazy(\n+ \"Rejected %s suggestion with comment. User will be notified\",\n \"Rejected %s suggestions with comment. Users will be notified\",\n count)\n elif accept_and_notify:\n- message = _(\n+ message = ungettext_lazy(\n+ \"Accepted %s suggestion with comment. User will be notified\",\n \"Accepted %s suggestions with comment. Users will be notified\",\n count)\n elif form.cleaned_data[\"actions\"] == \"reject\":\n- message = _(\"Rejected %s suggestions\", count)\n+ message = ungettext_lazy(\n+ \"Rejected %s suggestion\",\n+ \"Rejected %s suggestions\",\n+ count)\n else:\n- message = _(\"Accepted %s suggestions\", count)\n+ message = ungettext_lazy(\n+ \"Accepted %s suggestion\",\n+ \"Accepted %s suggestions\",\n+ count)\n messages.success(self.request, message)\n \n def get_context_data(self, **kwargs):\n", "issue": "Plural strings not marked as that\nWhile reviewing strings for 2.8 release, those are not in plural forms:\r\n\r\n- http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826598\r\n- http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826597\r\n- http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826594\r\n- http://pootle.locamotion.org/ar/pootle/translate/pootle.po#unit=1826584\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\nfrom django.utils.lru_cache import lru_cache\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import make_project_item\nfrom pootle.core.decorators import (\n get_object_or_404, get_path_obj, permission_required)\nfrom pootle.core.views import PootleBrowseView, PootleTranslateView\nfrom pootle.core.views.admin import PootleFormView\nfrom pootle.core.views.decorators import requires_permission, set_permissions\nfrom pootle.core.views.formtable import Formtable\nfrom pootle.core.views.mixins import PootleJSONMixin\nfrom pootle.i18n import formatter\nfrom pootle.i18n.gettext import tr_lang, ugettext_lazy as _\nfrom pootle_misc.util import cmp_by_last_activity\nfrom pootle_store.constants import STATES_MAP\n\nfrom .apps import PootleLanguageConfig\nfrom .forms import (\n LanguageSpecialCharsForm, LanguageSuggestionAdminForm, LanguageTeamAdminForm,\n LanguageTeamNewMemberSearchForm)\nfrom .models import Language\n\n\nclass LanguageMixin(object):\n ns = \"pootle.language\"\n sw_version = PootleLanguageConfig.version\n model = Language\n browse_url_path = \"pootle-language-browse\"\n translate_url_path = \"pootle-language-translate\"\n template_extends = 'languages/base.html'\n\n @property\n def language(self):\n return self.object\n\n @property\n def permission_context(self):\n return self.get_object().directory\n\n @property\n def url_kwargs(self):\n return {\"language_code\": self.object.code}\n\n @lru_cache()\n def get_object(self):\n lang = Language.get_canonical(self.kwargs[\"language_code\"])\n if lang is None:\n raise Http404\n return lang\n\n def get(self, *args, **kwargs):\n self.object = self.get_object()\n if self.object.code != kwargs[\"language_code\"]:\n return redirect(\n self.url_pattern_name,\n self.object.code,\n permanent=True)\n return super(LanguageMixin, self).get(*args, **kwargs)\n\n\nclass LanguageBrowseView(LanguageMixin, PootleBrowseView):\n url_pattern_name = \"pootle-language-browse\"\n view_name = \"language\"\n\n @cached_property\n def object_children(self):\n items = [make_project_item(tp)\n for tp in self.object.get_children_for_user(self.request.user)]\n items = self.add_child_stats(items)\n items.sort(cmp_by_last_activity)\n return items\n\n @property\n def language(self):\n return {\n 'code': self.object.code,\n 'name': tr_lang(self.object.fullname)}\n\n def get(self, *args, **kwargs):\n response = super(LanguageBrowseView, self).get(*args, **kwargs)\n response.set_cookie('pootle-language', self.object.code)\n return response\n\n\nclass LanguageTranslateView(LanguageMixin, PootleTranslateView):\n url_pattern_name = \"pootle-language-translate\"\n\n\n@get_path_obj\n@permission_required('administrate')\ndef language_characters_admin(request, language):\n form = LanguageSpecialCharsForm(request.POST\n if request.method == 'POST'\n else None,\n instance=language)\n if form.is_valid():\n form.save()\n return redirect('pootle-language-browse', language.code)\n\n ctx = {\n 'page': 'admin-characters',\n\n 'browse_url': reverse('pootle-language-browse', kwargs={\n 'language_code': language.code,\n }),\n 'translate_url': reverse('pootle-language-translate', kwargs={\n 'language_code': language.code,\n }),\n\n 'language': language,\n 'directory': language.directory,\n 'form': form,\n }\n\n return render(request, 'languages/admin/characters.html', ctx)\n\n\nclass SuggestionFormtable(Formtable):\n row_field = \"suggestions\"\n filters_template = \"languages/admin/includes/suggestions_header.html\"\n\n @property\n def messages(self):\n return self.kwargs.get(\"messages\", [])\n\n\nclass SuggestionDisplay(object):\n\n def __init__(self, suggestion):\n self.__suggestion__ = suggestion\n\n @property\n def unit(self):\n return self.__suggestion__.unit.source_f\n\n @property\n def project(self):\n tp = self.__suggestion__.unit.store.translation_project\n return mark_safe(\n \"<a href='%s'>%s</a>\"\n % (tp.get_absolute_url(),\n tp.project.code))\n\n @property\n def unit_state(self):\n return STATES_MAP[self.__suggestion__.unit.state]\n\n @property\n def unit_link(self):\n return mark_safe(\n \"<a href='%s'>#%s</a>\"\n % (self.__suggestion__.unit.get_translate_url(),\n self.__suggestion__.unit.id))\n\n def __getattr__(self, k):\n try:\n return getattr(self.__suggestion__, k)\n except AttributeError:\n return self.__getattribute__(k)\n\n\nclass PootleLanguageAdminFormView(PootleFormView):\n\n @property\n def permission_context(self):\n return self.language.directory\n\n @set_permissions\n @requires_permission(\"administrate\")\n def dispatch(self, request, *args, **kwargs):\n # get funky with the request 8/\n return super(\n PootleLanguageAdminFormView, self).dispatch(request, *args, **kwargs)\n\n @cached_property\n def language(self):\n return get_object_or_404(\n Language.objects.select_related(\"directory\"),\n code=self.kwargs[\"language_code\"])\n\n def get_form_kwargs(self):\n kwargs = super(PootleLanguageAdminFormView, self).get_form_kwargs()\n kwargs[\"language\"] = self.language\n return kwargs\n\n @property\n def success_kwargs(self):\n return dict(language_code=self.language.code)\n\n\nclass LanguageSuggestionAdminView(PootleLanguageAdminFormView):\n template_name = 'languages/admin/language_team_suggestions.html'\n form_class = LanguageSuggestionAdminForm\n success_url_pattern = \"pootle-language-admin-suggestions\"\n formtable_columns = (\n _(\"Unit\"),\n _(\"State\"),\n _(\"Source\"),\n _(\"Suggestion\"),\n _(\"Suggested by\"),\n _(\"Suggested at\"),\n _(\"Project\"))\n\n @property\n def default_form_kwargs(self):\n return dict(\n page_no=1,\n results_per_page=10)\n\n def add_success_message(self, form):\n count = (\n form.fields[\"suggestions\"].queryset.count()\n if form.cleaned_data[\"select_all\"]\n else len(form.cleaned_data[\"suggestions\"]))\n reject_and_notify = (\n form.cleaned_data[\"actions\"] == \"reject\"\n and form.cleaned_data[\"comment\"])\n accept_and_notify = (\n form.cleaned_data[\"actions\"] == \"accept\"\n and form.cleaned_data[\"comment\"])\n if reject_and_notify:\n message = _(\n \"Rejected %s suggestions with comment. Users will be notified\",\n count)\n elif accept_and_notify:\n message = _(\n \"Accepted %s suggestions with comment. Users will be notified\",\n count)\n elif form.cleaned_data[\"actions\"] == \"reject\":\n message = _(\"Rejected %s suggestions\", count)\n else:\n message = _(\"Accepted %s suggestions\", count)\n messages.success(self.request, message)\n\n def get_context_data(self, **kwargs):\n context = super(\n LanguageSuggestionAdminView, self).get_context_data(**kwargs)\n context[\"page\"] = \"admin-suggestions\"\n context[\"language\"] = self.language\n form = context[\"form\"]\n form.is_valid()\n batch = form.batch()\n form.fields[\"suggestions\"].choices = [\n (item.id, SuggestionDisplay(item))\n for item in\n batch.object_list]\n context[\"formtable\"] = SuggestionFormtable(\n form,\n columns=self.formtable_columns,\n page=batch,\n messages=messages.get_messages(self.request))\n return context\n\n def get_form_kwargs(self):\n kwargs = super(LanguageSuggestionAdminView, self).get_form_kwargs()\n if not self.request.POST:\n kwargs[\"data\"] = self.default_form_kwargs\n kwargs[\"user\"] = self.request.user\n return kwargs\n\n\nclass LanguageTeamAdminFormView(PootleLanguageAdminFormView):\n form_class = LanguageTeamAdminForm\n template_name = \"languages/admin/language_team.html\"\n success_url_pattern = \"pootle-language-admin-team\"\n\n def get_context_data(self, **kwargs):\n context = super(LanguageTeamAdminFormView, self).get_context_data(**kwargs)\n form = context[\"form\"]\n context[\"tps\"] = self.language.translationproject_set.exclude(\n project__disabled=True)\n stats = self.language.data_tool.get_stats(\n include_children=False,\n user=self.request.user)\n keys = (\"total\", \"critical\", \"incomplete\", \"translated\", \"fuzzy\",\n \"untranslated\")\n for k in keys:\n if k in stats:\n stats[k + \"_display\"] = formatter.number(stats[k])\n context[\"stats\"] = stats\n context[\"suggestions\"] = form.language_team.suggestions\n suggestions_count = 0\n if context[\"suggestions\"]:\n suggestions_count = context[\"suggestions\"].count()\n context[\"suggestions_display\"] = formatter.number(suggestions_count)\n context[\"language\"] = self.language\n context[\"page\"] = \"admin-team\"\n context[\"browse_url\"] = reverse(\n \"pootle-language-browse\",\n kwargs=dict(language_code=self.language.code))\n context[\"translate_url\"] = reverse(\n \"pootle-language-translate\",\n kwargs=dict(language_code=self.language.code))\n return context\n\n\nclass LanguageTeamAdminNewMembersJSON(PootleJSONMixin, PootleLanguageAdminFormView):\n form_class = LanguageTeamNewMemberSearchForm\n\n def get_context_data(self, **kwargs):\n context = super(\n LanguageTeamAdminNewMembersJSON, self).get_context_data(**kwargs)\n form = context[\"form\"]\n return (\n dict(items=form.search())\n if form.is_valid()\n else dict(items=[]))\n\n def get_form_kwargs(self):\n kwargs = super(LanguageTeamAdminNewMembersJSON, self).get_form_kwargs()\n kwargs[\"data\"] = self.request.GET\n return kwargs\n", "path": "pootle/apps/pootle_language/views.py"}]}
3,910
420
gh_patches_debug_34147
rasdani/github-patches
git_diff
kivy__python-for-android-1410
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Comprehensive list of broken recipes When working on https://github.com/kivy/python-for-android/pull/1401 I realised we still have some broken recipes in the tree at least for python3crystax. Even though we don't want to have red builds for things that were already broken, we still want to have a clear status of what's broken and what's not. Basically the idea is to try to compile every single recipes and add the broken ones in the ignore list (`BROKEN_RECIPES`) from #1401. That way we can track and fix them later on meanwhile keeping a green build. I would like to address it in this task. Basically the output of the task should be a PR making the `BROKEN_RECIPES` list comprehensive. With bonus points for creating an issue per broken recipes :smile: </issue> <code> [start of ci/constants.py] 1 from enum import Enum 2 3 4 class TargetPython(Enum): 5 python2 = 0 6 python3crystax = 1 7 8 9 # recipes that currently break the build 10 # a recipe could be broken for a target Python and not for the other, 11 # hence we're maintaining one list per Python target 12 BROKEN_RECIPES_PYTHON2 = set([]) 13 BROKEN_RECIPES_PYTHON3_CRYSTAX = set([ 14 # not yet python3crystax compatible 15 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy', 16 'coverage', 'dateutil', 'enaml', 'ethash', 'kiwisolver', 'libgeos', 17 'libnacl', 'libsodium', 'libtorrent', 'libtribler', 'libzbar', 'libzmq', 18 'm2crypto', 'mysqldb', 'ndghttpsclient', 'pil', 'pycrypto', 'pyethereum', 19 'pygame', 'pyleveldb', 'pyproj', 'pyzmq', 'regex', 'shapely', 20 'simple-crypt', 'twsisted', 'vispy', 'websocket-client', 'zbar', 21 'zeroconf', 'zope', 22 # https://github.com/kivy/python-for-android/issues/550 23 'audiostream', 24 # enum34 is not compatible with Python 3.6 standard library 25 # https://stackoverflow.com/a/45716067/185510 26 'enum34', 27 # https://github.com/kivy/python-for-android/issues/1398 28 'ifaddrs', 29 # https://github.com/kivy/python-for-android/issues/1399 30 'libglob', 31 # cannot find -lcrystax 32 'cffi', 'pycryptodome', 'pymuk', 'secp256k1', 33 # https://github.com/kivy/python-for-android/issues/1404 34 'cryptography', 35 # https://github.com/kivy/python-for-android/issues/1294 36 'ffmpeg', 'ffpyplayer', 37 # https://github.com/kivy/python-for-android/pull/1307 ? 38 'gevent', 39 'icu', 40 # https://github.com/kivy/python-for-android/issues/1354 41 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen', 42 'kivy', 43 # https://github.com/kivy/python-for-android/issues/1405 44 'libpq', 'psycopg2', 45 'netifaces', 46 # https://github.com/kivy/python-for-android/issues/1315 ? 47 'opencv', 48 'protobuf_cpp', 49 # most likely some setup in the Docker container, because it works in host 50 'pyjnius', 'pyopenal', 51 # SyntaxError: invalid syntax (Python2) 52 'storm', 53 'vlc', 54 ]) 55 BROKEN_RECIPES = { 56 TargetPython.python2: BROKEN_RECIPES_PYTHON2, 57 TargetPython.python3crystax: BROKEN_RECIPES_PYTHON3_CRYSTAX, 58 } 59 # recipes that were already built will be skipped 60 CORE_RECIPES = set([ 61 'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools', 62 'numpy', 'android', 'python2', 63 ]) 64 [end of ci/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ci/constants.py b/ci/constants.py --- a/ci/constants.py +++ b/ci/constants.py @@ -9,7 +9,57 @@ # recipes that currently break the build # a recipe could be broken for a target Python and not for the other, # hence we're maintaining one list per Python target -BROKEN_RECIPES_PYTHON2 = set([]) +BROKEN_RECIPES_PYTHON2 = set([ + # pythonhelpers.h:12:18: fatal error: string: No such file or directory + 'atom', + # https://github.com/kivy/python-for-android/issues/550 + 'audiostream', + 'brokenrecipe', + # https://github.com/kivy/python-for-android/issues/1409 + 'enaml', + 'evdev', + # distutils.errors.DistutilsError + # Could not find suitable distribution for Requirement.parse('cython') + 'ffpyplayer', + 'flask', + 'groestlcoin_hash', + 'hostpython3crystax', + # https://github.com/kivy/python-for-android/issues/1398 + 'ifaddrs', + # https://github.com/kivy/python-for-android/issues/1354 + 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen', + 'kiwisolver', + # system dependencies autoconf, libtool + 'libexpat', + 'libgeos', + # https://github.com/kivy/python-for-android/issues/1399 + 'libglob', + # system dependencies cmake and compile error + 'libmysqlclient', + 'libsecp256k1', + 'libtribler', + # system dependencies gettext, pkg-config + 'libzbar', + 'ndghttpsclient', + 'm2crypto', + 'netifaces', + 'Pillow', + # https://github.com/kivy/python-for-android/issues/1405 + 'psycopg2', + 'pygame', + # most likely some setup in the Docker container, because it works in host + 'pyjnius', 'pyopenal', + 'pyproj', + 'pysdl2', + 'pyzmq', + 'secp256k1', + 'shapely', + 'twisted', + 'vlc', + 'websocket-client', + 'zeroconf', + 'zope', +]) BROKEN_RECIPES_PYTHON3_CRYSTAX = set([ # not yet python3crystax compatible 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy', @@ -39,7 +89,8 @@ 'icu', # https://github.com/kivy/python-for-android/issues/1354 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen', - 'kivy', + # system dependencies autoconf, libtool + 'libexpat', # https://github.com/kivy/python-for-android/issues/1405 'libpq', 'psycopg2', 'netifaces',
{"golden_diff": "diff --git a/ci/constants.py b/ci/constants.py\n--- a/ci/constants.py\n+++ b/ci/constants.py\n@@ -9,7 +9,57 @@\n # recipes that currently break the build\n # a recipe could be broken for a target Python and not for the other,\n # hence we're maintaining one list per Python target\n-BROKEN_RECIPES_PYTHON2 = set([])\n+BROKEN_RECIPES_PYTHON2 = set([\n+ # pythonhelpers.h:12:18: fatal error: string: No such file or directory\n+ 'atom',\n+ # https://github.com/kivy/python-for-android/issues/550\n+ 'audiostream',\n+ 'brokenrecipe',\n+ # https://github.com/kivy/python-for-android/issues/1409\n+ 'enaml',\n+ 'evdev',\n+ # distutils.errors.DistutilsError\n+ # Could not find suitable distribution for Requirement.parse('cython')\n+ 'ffpyplayer',\n+ 'flask',\n+ 'groestlcoin_hash',\n+ 'hostpython3crystax',\n+ # https://github.com/kivy/python-for-android/issues/1398\n+ 'ifaddrs',\n+ # https://github.com/kivy/python-for-android/issues/1354\n+ 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n+ 'kiwisolver',\n+ # system dependencies autoconf, libtool\n+ 'libexpat',\n+ 'libgeos',\n+ # https://github.com/kivy/python-for-android/issues/1399\n+ 'libglob',\n+ # system dependencies cmake and compile error\n+ 'libmysqlclient',\n+ 'libsecp256k1',\n+ 'libtribler',\n+ # system dependencies gettext, pkg-config\n+ 'libzbar',\n+ 'ndghttpsclient',\n+ 'm2crypto',\n+ 'netifaces',\n+ 'Pillow',\n+ # https://github.com/kivy/python-for-android/issues/1405\n+ 'psycopg2',\n+ 'pygame',\n+ # most likely some setup in the Docker container, because it works in host\n+ 'pyjnius', 'pyopenal',\n+ 'pyproj',\n+ 'pysdl2',\n+ 'pyzmq',\n+ 'secp256k1',\n+ 'shapely',\n+ 'twisted',\n+ 'vlc',\n+ 'websocket-client',\n+ 'zeroconf',\n+ 'zope',\n+])\n BROKEN_RECIPES_PYTHON3_CRYSTAX = set([\n # not yet python3crystax compatible\n 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',\n@@ -39,7 +89,8 @@\n 'icu',\n # https://github.com/kivy/python-for-android/issues/1354\n 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n- 'kivy',\n+ # system dependencies autoconf, libtool\n+ 'libexpat',\n # https://github.com/kivy/python-for-android/issues/1405\n 'libpq', 'psycopg2',\n 'netifaces',\n", "issue": "Comprehensive list of broken recipes\nWhen working on https://github.com/kivy/python-for-android/pull/1401 I realised we still have some broken recipes in the tree at least for python3crystax.\r\nEven though we don't want to have red builds for things that were already broken, we still want to have a clear status of what's broken and what's not.\r\nBasically the idea is to try to compile every single recipes and add the broken ones in the ignore list (`BROKEN_RECIPES`) from #1401. That way we can track and fix them later on meanwhile keeping a green build.\r\nI would like to address it in this task. Basically the output of the task should be a PR making the `BROKEN_RECIPES` list comprehensive. With bonus points for creating an issue per broken recipes :smile: \r\n\n", "before_files": [{"content": "from enum import Enum\n\n\nclass TargetPython(Enum):\n python2 = 0\n python3crystax = 1\n\n\n# recipes that currently break the build\n# a recipe could be broken for a target Python and not for the other,\n# hence we're maintaining one list per Python target\nBROKEN_RECIPES_PYTHON2 = set([])\nBROKEN_RECIPES_PYTHON3_CRYSTAX = set([\n # not yet python3crystax compatible\n 'apsw', 'atom', 'boost', 'brokenrecipe', 'cdecimal', 'cherrypy',\n 'coverage', 'dateutil', 'enaml', 'ethash', 'kiwisolver', 'libgeos',\n 'libnacl', 'libsodium', 'libtorrent', 'libtribler', 'libzbar', 'libzmq',\n 'm2crypto', 'mysqldb', 'ndghttpsclient', 'pil', 'pycrypto', 'pyethereum',\n 'pygame', 'pyleveldb', 'pyproj', 'pyzmq', 'regex', 'shapely',\n 'simple-crypt', 'twsisted', 'vispy', 'websocket-client', 'zbar',\n 'zeroconf', 'zope',\n # https://github.com/kivy/python-for-android/issues/550\n 'audiostream',\n # enum34 is not compatible with Python 3.6 standard library\n # https://stackoverflow.com/a/45716067/185510\n 'enum34',\n # https://github.com/kivy/python-for-android/issues/1398\n 'ifaddrs',\n # https://github.com/kivy/python-for-android/issues/1399\n 'libglob',\n # cannot find -lcrystax\n 'cffi', 'pycryptodome', 'pymuk', 'secp256k1',\n # https://github.com/kivy/python-for-android/issues/1404\n 'cryptography',\n # https://github.com/kivy/python-for-android/issues/1294\n 'ffmpeg', 'ffpyplayer',\n # https://github.com/kivy/python-for-android/pull/1307 ?\n 'gevent',\n 'icu',\n # https://github.com/kivy/python-for-android/issues/1354\n 'kivent_core', 'kivent_cymunk', 'kivent_particles', 'kivent_polygen',\n 'kivy',\n # https://github.com/kivy/python-for-android/issues/1405\n 'libpq', 'psycopg2',\n 'netifaces',\n # https://github.com/kivy/python-for-android/issues/1315 ?\n 'opencv',\n 'protobuf_cpp',\n # most likely some setup in the Docker container, because it works in host\n 'pyjnius', 'pyopenal',\n # SyntaxError: invalid syntax (Python2)\n 'storm',\n 'vlc',\n])\nBROKEN_RECIPES = {\n TargetPython.python2: BROKEN_RECIPES_PYTHON2,\n TargetPython.python3crystax: BROKEN_RECIPES_PYTHON3_CRYSTAX,\n}\n# recipes that were already built will be skipped\nCORE_RECIPES = set([\n 'pyjnius', 'kivy', 'openssl', 'requests', 'sqlite3', 'setuptools',\n 'numpy', 'android', 'python2',\n])\n", "path": "ci/constants.py"}]}
1,586
752
gh_patches_debug_28184
rasdani/github-patches
git_diff
pulp__pulpcore-5088
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enable file-less "uploads" **Is your feature request related to a problem? Please describe.** Right now pulpcore knows artifactless types, that can be created via file upload using the `NoArtifactContentUploadViewSet` and the `NoArtifactContentUploadSerializer`, which can be combined with "retrieve behaviour" (do not throw errors if the requested content already esists/is already in the repo it should be added to) by defining a plugin specific `retrieve` function on the serializer. However, pulp_deb has several artifact less types, that do not need an actual uploaded file as part of this process at all. All they need (for pulp_deb to be able to create them) is the set of required API parameters. Examples include the `ReleaseComponent` and `ReleaseArchitecture`. These content types should still use the `repository` parameter to create and add them to a repository in one action, along with "retrieve behaviour". Since this means creating new repository versions, this action must be performed as a task to ensure resource locks. As far as I can tell this is currently not possible, because pulpcore does not have the right kind of `ViewSet`. I was able to get things to work with the following adjustments to the `NoArtifactContentUploadViewSet`: https://github.com/pulp/pulpcore/pull/5084 An alternative might be to split up `NoArtifactContentUploadViewSet` into `NoArtifactContentUploadViewSet` and `NoArtifactContentViewSet`, which would mirror the class structure on the serializer side, and possibly make the semantic intention more clear. **Additional context** - See here for the pulp_deb change that prompted this need: https://github.com/pulp/pulp_deb/pull/1018 - I am happy to implement this as soon as there is a consensus on whether to add a whole new ViewSet in the class hierarchy, or whether it is enough to adjust `NoArtifactContentUploadViewSet` to support both use cases. - I could use help in designing a good test to cover this special use case. </issue> <code> [start of pulpcore/plugin/viewsets/content.py] 1 from drf_spectacular.utils import extend_schema 2 3 from django.db import DatabaseError 4 from django.db.utils import IntegrityError 5 6 from pulpcore.app import tasks 7 from pulpcore.plugin.serializers import ( 8 ArtifactSerializer, 9 AsyncOperationResponseSerializer, 10 ) 11 from pulpcore.plugin.models import Artifact, PulpTemporaryFile 12 from pulpcore.plugin.tasking import dispatch 13 from pulpcore.plugin.viewsets import ( 14 ContentViewSet, 15 OperationPostponedResponse, 16 ) 17 18 19 class DefaultDeferredContextMixin: 20 """A mixin that provides a method for retrieving the default deferred context.""" 21 22 def get_deferred_context(self, request): 23 """ 24 Supply context for deferred validation. 25 26 When overwriting this method, it must return a dict, that is JSON serializable by 27 and does _not_ contain 'request' as a key. 28 """ 29 return {} 30 31 32 class NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet): 33 """A ViewSet for uploads that do not require to store an uploaded content as an Artifact.""" 34 35 @extend_schema( 36 description="Trigger an asynchronous task to create content," 37 "optionally create new repository version.", 38 responses={202: AsyncOperationResponseSerializer}, 39 ) 40 def create(self, request): 41 """Create a content unit.""" 42 serializer = self.get_serializer(data=request.data) 43 serializer.is_valid(raise_exception=True) 44 45 task_payload = {k: v for k, v in request.data.items()} 46 47 file_content = task_payload.pop("file", None) 48 temp_file = PulpTemporaryFile.init_and_validate(file_content) 49 temp_file.save() 50 51 exclusive_resources = [ 52 item for item in (serializer.validated_data.get(key) for key in ("repository",)) if item 53 ] 54 55 app_label = self.queryset.model._meta.app_label 56 context = self.get_deferred_context(request) 57 context["pulp_temp_file_pk"] = str(temp_file.pk) 58 task = dispatch( 59 tasks.base.general_create, 60 exclusive_resources=exclusive_resources, 61 args=(app_label, serializer.__class__.__name__), 62 kwargs={"data": task_payload, "context": context}, 63 ) 64 return OperationPostponedResponse(task, request) 65 66 67 class SingleArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet): 68 """A ViewSet which can be used to store an uploaded content as an Artifact.""" 69 70 @extend_schema( 71 description="Trigger an asynchronous task to create content," 72 "optionally create new repository version.", 73 responses={202: AsyncOperationResponseSerializer}, 74 ) 75 def create(self, request): 76 """Create a content unit.""" 77 serializer = self.get_serializer(data=request.data) 78 serializer.is_valid(raise_exception=True) 79 80 task_payload = self.init_content_data(serializer, request) 81 82 exclusive_resources = [ 83 item 84 for item in (serializer.validated_data.get(key) for key in ("upload", "repository")) 85 if item 86 ] 87 88 app_label = self.queryset.model._meta.app_label 89 task = dispatch( 90 tasks.base.general_create, 91 exclusive_resources=exclusive_resources, 92 args=(app_label, serializer.__class__.__name__), 93 kwargs={ 94 "data": task_payload, 95 "context": self.get_deferred_context(request), 96 }, 97 ) 98 return OperationPostponedResponse(task, request) 99 100 def init_content_data(self, serializer, request): 101 """Initialize the reference to an Artifact along with relevant task's payload data.""" 102 task_payload = {k: v for k, v in request.data.items()} 103 if "file" in task_payload: 104 # in the upload code path make sure, the artifact exists, and the 'file' 105 # parameter is replaced by 'artifact' 106 artifact = Artifact.init_and_validate(task_payload.pop("file")) 107 try: 108 artifact.save() 109 except IntegrityError: 110 # if artifact already exists, let's use it 111 try: 112 artifact = Artifact.objects.get( 113 sha256=artifact.sha256, pulp_domain=request.pulp_domain 114 ) 115 artifact.touch() 116 except (Artifact.DoesNotExist, DatabaseError): 117 # the artifact has since been removed from when we first attempted to save it 118 artifact.save() 119 120 task_payload["artifact"] = ArtifactSerializer( 121 artifact, context={"request": request} 122 ).data["pulp_href"] 123 elif "artifact" in serializer.validated_data: 124 serializer.validated_data["artifact"].touch() 125 # In case of a provided upload object, there is no artifact to touch yet. 126 127 return task_payload 128 [end of pulpcore/plugin/viewsets/content.py] [start of pulpcore/plugin/viewsets/__init__.py] 1 from pulpcore.filters import BaseFilterSet 2 3 # Allow plugin viewsets to return 202s 4 from pulpcore.app.response import OperationPostponedResponse, TaskGroupOperationResponse 5 6 # Import Viewsets in platform that are potentially useful to plugin writers 7 from pulpcore.app.viewsets import ( 8 AlternateContentSourceViewSet, 9 AsyncUpdateMixin, 10 ContentFilter, 11 ContentGuardFilter, 12 ContentGuardViewSet, 13 ContentViewSet, 14 DistributionFilter, 15 DistributionViewSet, 16 ExporterViewSet, 17 ExportViewSet, 18 ImmutableRepositoryViewSet, 19 ImporterViewSet, 20 ImportViewSet, 21 LabelsMixin, 22 NamedModelViewSet, 23 NAME_FILTER_OPTIONS, 24 NULLABLE_NUMERIC_FILTER_OPTIONS, 25 PublicationFilter, 26 PublicationViewSet, 27 ReadOnlyContentViewSet, 28 ReadOnlyRepositoryViewSet, 29 RemoteFilter, 30 RemoteViewSet, 31 RepositoryVersionViewSet, 32 RepositoryViewSet, 33 RolesMixin, 34 TaskGroupViewSet, 35 TaskViewSet, 36 ) 37 38 from pulpcore.app.viewsets.custom_filters import ( 39 CharInFilter, 40 LabelFilter, 41 RepositoryVersionFilter, 42 ) 43 44 from pulpcore.filters import HyperlinkRelatedFilter 45 46 from .content import ( 47 NoArtifactContentUploadViewSet, 48 SingleArtifactContentUploadViewSet, 49 ) 50 [end of pulpcore/plugin/viewsets/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/plugin/viewsets/__init__.py b/pulpcore/plugin/viewsets/__init__.py --- a/pulpcore/plugin/viewsets/__init__.py +++ b/pulpcore/plugin/viewsets/__init__.py @@ -44,6 +44,7 @@ from pulpcore.filters import HyperlinkRelatedFilter from .content import ( + NoArtifactContentViewSet, NoArtifactContentUploadViewSet, SingleArtifactContentUploadViewSet, ) diff --git a/pulpcore/plugin/viewsets/content.py b/pulpcore/plugin/viewsets/content.py --- a/pulpcore/plugin/viewsets/content.py +++ b/pulpcore/plugin/viewsets/content.py @@ -29,6 +29,35 @@ return {} +class NoArtifactContentViewSet(DefaultDeferredContextMixin, ContentViewSet): + """A ViewSet for content creation that does not require a file to be uploaded.""" + + @extend_schema( + description="Trigger an asynchronous task to create content," + "optionally create new repository version.", + responses={202: AsyncOperationResponseSerializer}, + ) + def create(self, request): + """Create a content unit.""" + serializer = self.get_serializer(data=request.data) + serializer.is_valid(raise_exception=True) + + exclusive_resources = [ + item for item in (serializer.validated_data.get(key) for key in ("repository",)) if item + ] + + task = dispatch( + tasks.base.general_create, + exclusive_resources=exclusive_resources, + args=(self.queryset.model._meta.app_label, serializer.__class__.__name__), + kwargs={ + "data": {k: v for k, v in request.data.items()}, + "context": self.get_deferred_context(request), + }, + ) + return OperationPostponedResponse(task, request) + + class NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet): """A ViewSet for uploads that do not require to store an uploaded content as an Artifact."""
{"golden_diff": "diff --git a/pulpcore/plugin/viewsets/__init__.py b/pulpcore/plugin/viewsets/__init__.py\n--- a/pulpcore/plugin/viewsets/__init__.py\n+++ b/pulpcore/plugin/viewsets/__init__.py\n@@ -44,6 +44,7 @@\n from pulpcore.filters import HyperlinkRelatedFilter\n \n from .content import (\n+ NoArtifactContentViewSet,\n NoArtifactContentUploadViewSet,\n SingleArtifactContentUploadViewSet,\n )\ndiff --git a/pulpcore/plugin/viewsets/content.py b/pulpcore/plugin/viewsets/content.py\n--- a/pulpcore/plugin/viewsets/content.py\n+++ b/pulpcore/plugin/viewsets/content.py\n@@ -29,6 +29,35 @@\n return {}\n \n \n+class NoArtifactContentViewSet(DefaultDeferredContextMixin, ContentViewSet):\n+ \"\"\"A ViewSet for content creation that does not require a file to be uploaded.\"\"\"\n+\n+ @extend_schema(\n+ description=\"Trigger an asynchronous task to create content,\"\n+ \"optionally create new repository version.\",\n+ responses={202: AsyncOperationResponseSerializer},\n+ )\n+ def create(self, request):\n+ \"\"\"Create a content unit.\"\"\"\n+ serializer = self.get_serializer(data=request.data)\n+ serializer.is_valid(raise_exception=True)\n+\n+ exclusive_resources = [\n+ item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n+ ]\n+\n+ task = dispatch(\n+ tasks.base.general_create,\n+ exclusive_resources=exclusive_resources,\n+ args=(self.queryset.model._meta.app_label, serializer.__class__.__name__),\n+ kwargs={\n+ \"data\": {k: v for k, v in request.data.items()},\n+ \"context\": self.get_deferred_context(request),\n+ },\n+ )\n+ return OperationPostponedResponse(task, request)\n+\n+\n class NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for uploads that do not require to store an uploaded content as an Artifact.\"\"\"\n", "issue": "Enable file-less \"uploads\"\n**Is your feature request related to a problem? Please describe.**\r\n\r\nRight now pulpcore knows artifactless types, that can be created via file upload using the `NoArtifactContentUploadViewSet` and the `NoArtifactContentUploadSerializer`, which can be combined with \"retrieve behaviour\" (do not throw errors if the requested content already esists/is already in the repo it should be added to) by defining a plugin specific `retrieve` function on the serializer.\r\n\r\nHowever, pulp_deb has several artifact less types, that do not need an actual uploaded file as part of this process at all. All they need (for pulp_deb to be able to create them) is the set of required API parameters. Examples include the `ReleaseComponent` and `ReleaseArchitecture`. These content types should still use the `repository` parameter to create and add them to a repository in one action, along with \"retrieve behaviour\". Since this means creating new repository versions, this action must be performed as a task to ensure resource locks.\r\n\r\nAs far as I can tell this is currently not possible, because pulpcore does not have the right kind of `ViewSet`. I was able to get things to work with the following adjustments to the `NoArtifactContentUploadViewSet`: https://github.com/pulp/pulpcore/pull/5084\r\n\r\nAn alternative might be to split up `NoArtifactContentUploadViewSet` into `NoArtifactContentUploadViewSet` and `NoArtifactContentViewSet`, which would mirror the class structure on the serializer side, and possibly make the semantic intention more clear.\r\n\r\n**Additional context**\r\n\r\n- See here for the pulp_deb change that prompted this need: https://github.com/pulp/pulp_deb/pull/1018\r\n- I am happy to implement this as soon as there is a consensus on whether to add a whole new ViewSet in the class hierarchy, or whether it is enough to adjust `NoArtifactContentUploadViewSet` to support both use cases.\r\n- I could use help in designing a good test to cover this special use case.\r\n\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\n\nfrom django.db import DatabaseError\nfrom django.db.utils import IntegrityError\n\nfrom pulpcore.app import tasks\nfrom pulpcore.plugin.serializers import (\n ArtifactSerializer,\n AsyncOperationResponseSerializer,\n)\nfrom pulpcore.plugin.models import Artifact, PulpTemporaryFile\nfrom pulpcore.plugin.tasking import dispatch\nfrom pulpcore.plugin.viewsets import (\n ContentViewSet,\n OperationPostponedResponse,\n)\n\n\nclass DefaultDeferredContextMixin:\n \"\"\"A mixin that provides a method for retrieving the default deferred context.\"\"\"\n\n def get_deferred_context(self, request):\n \"\"\"\n Supply context for deferred validation.\n\n When overwriting this method, it must return a dict, that is JSON serializable by\n and does _not_ contain 'request' as a key.\n \"\"\"\n return {}\n\n\nclass NoArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet for uploads that do not require to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = {k: v for k, v in request.data.items()}\n\n file_content = task_payload.pop(\"file\", None)\n temp_file = PulpTemporaryFile.init_and_validate(file_content)\n temp_file.save()\n\n exclusive_resources = [\n item for item in (serializer.validated_data.get(key) for key in (\"repository\",)) if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n context = self.get_deferred_context(request)\n context[\"pulp_temp_file_pk\"] = str(temp_file.pk)\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\"data\": task_payload, \"context\": context},\n )\n return OperationPostponedResponse(task, request)\n\n\nclass SingleArtifactContentUploadViewSet(DefaultDeferredContextMixin, ContentViewSet):\n \"\"\"A ViewSet which can be used to store an uploaded content as an Artifact.\"\"\"\n\n @extend_schema(\n description=\"Trigger an asynchronous task to create content,\"\n \"optionally create new repository version.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def create(self, request):\n \"\"\"Create a content unit.\"\"\"\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n task_payload = self.init_content_data(serializer, request)\n\n exclusive_resources = [\n item\n for item in (serializer.validated_data.get(key) for key in (\"upload\", \"repository\"))\n if item\n ]\n\n app_label = self.queryset.model._meta.app_label\n task = dispatch(\n tasks.base.general_create,\n exclusive_resources=exclusive_resources,\n args=(app_label, serializer.__class__.__name__),\n kwargs={\n \"data\": task_payload,\n \"context\": self.get_deferred_context(request),\n },\n )\n return OperationPostponedResponse(task, request)\n\n def init_content_data(self, serializer, request):\n \"\"\"Initialize the reference to an Artifact along with relevant task's payload data.\"\"\"\n task_payload = {k: v for k, v in request.data.items()}\n if \"file\" in task_payload:\n # in the upload code path make sure, the artifact exists, and the 'file'\n # parameter is replaced by 'artifact'\n artifact = Artifact.init_and_validate(task_payload.pop(\"file\"))\n try:\n artifact.save()\n except IntegrityError:\n # if artifact already exists, let's use it\n try:\n artifact = Artifact.objects.get(\n sha256=artifact.sha256, pulp_domain=request.pulp_domain\n )\n artifact.touch()\n except (Artifact.DoesNotExist, DatabaseError):\n # the artifact has since been removed from when we first attempted to save it\n artifact.save()\n\n task_payload[\"artifact\"] = ArtifactSerializer(\n artifact, context={\"request\": request}\n ).data[\"pulp_href\"]\n elif \"artifact\" in serializer.validated_data:\n serializer.validated_data[\"artifact\"].touch()\n # In case of a provided upload object, there is no artifact to touch yet.\n\n return task_payload\n", "path": "pulpcore/plugin/viewsets/content.py"}, {"content": "from pulpcore.filters import BaseFilterSet\n\n# Allow plugin viewsets to return 202s\nfrom pulpcore.app.response import OperationPostponedResponse, TaskGroupOperationResponse\n\n# Import Viewsets in platform that are potentially useful to plugin writers\nfrom pulpcore.app.viewsets import (\n AlternateContentSourceViewSet,\n AsyncUpdateMixin,\n ContentFilter,\n ContentGuardFilter,\n ContentGuardViewSet,\n ContentViewSet,\n DistributionFilter,\n DistributionViewSet,\n ExporterViewSet,\n ExportViewSet,\n ImmutableRepositoryViewSet,\n ImporterViewSet,\n ImportViewSet,\n LabelsMixin,\n NamedModelViewSet,\n NAME_FILTER_OPTIONS,\n NULLABLE_NUMERIC_FILTER_OPTIONS,\n PublicationFilter,\n PublicationViewSet,\n ReadOnlyContentViewSet,\n ReadOnlyRepositoryViewSet,\n RemoteFilter,\n RemoteViewSet,\n RepositoryVersionViewSet,\n RepositoryViewSet,\n RolesMixin,\n TaskGroupViewSet,\n TaskViewSet,\n)\n\nfrom pulpcore.app.viewsets.custom_filters import (\n CharInFilter,\n LabelFilter,\n RepositoryVersionFilter,\n)\n\nfrom pulpcore.filters import HyperlinkRelatedFilter\n\nfrom .content import (\n NoArtifactContentUploadViewSet,\n SingleArtifactContentUploadViewSet,\n)\n", "path": "pulpcore/plugin/viewsets/__init__.py"}]}
2,572
442
gh_patches_debug_2654
rasdani/github-patches
git_diff
microsoft__nni-5155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unclear what extras to install: `import nni.retiarii.execution.api` fails due to missing `pytorch_lightning` **Describe the issue**: I want to use `nni.retiarii.execution.api` module. I've installed it as below: ``` Collecting nni>=2.3 Downloading nni-2.9-py3-none-manylinux1_x86_64.whl (56.0 MB) ``` **Environment**: - NNI version: 2.9 - Python version: 3.8 **Log message**: ``` _________________ ERROR collecting test/3rd_party/test_nni.py __________________ ImportError while importing test module '/__w/ai4cl-tianshou/ai4cl-tianshou/test/3rd_party/test_nni.py'. Hint: make sure your test modules/packages have valid Python names. Traceback: /usr/local/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) test/3rd_party/test_nni.py:8: in <module> import nni.retiarii.execution.api /usr/local/lib/python3.8/site-packages/nni/retiarii/__init__.py:4: in <module> from .operation import Operation /usr/local/lib/python3.8/site-packages/nni/retiarii/operation.py:6: in <module> from nni.nas.execution.common.graph_op import * /usr/local/lib/python3.8/site-packages/nni/nas/__init__.py:4: in <module> from .execution import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/__init__.py:4: in <module> from .api import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/api.py:9: in <module> from nni.nas.execution.common import ( /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/__init__.py:4: in <module> from .engine import * /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/engine.py:7: in <module> from .graph import Model, MetricData /usr/local/lib/python3.8/site-packages/nni/nas/execution/common/graph.py:18: in <module> from nni.nas.evaluator import Evaluator /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/__init__.py:9: in <module> shortcut_framework(__name__) /usr/local/lib/python3.8/site-packages/nni/common/framework.py:93: in shortcut_framework shortcut_module(current, '.' + get_default_framework(), current) /usr/local/lib/python3.8/site-packages/nni/common/framework.py:83: in shortcut_module mod = importlib.import_module(target, package) /usr/local/lib/python3.8/importlib/__init__.py:127: in import_module return _bootstrap._gcd_import(name[level:], package, level) /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/__init__.py:4: in <module> from .lightning import * /usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/lightning.py:10: in <module> import pytorch_lightning as pl E ModuleNotFoundError: No module named 'pytorch_lightning' ``` **How to reproduce it?**: ``` pip install nni==2.9 python -c "import nni.retiarii.execution.api" ``` </issue> <code> [start of nni/nas/evaluator/pytorch/__init__.py] 1 # Copyright (c) Microsoft Corporation. 2 # Licensed under the MIT license. 3 4 from .lightning import * 5 [end of nni/nas/evaluator/pytorch/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nni/nas/evaluator/pytorch/__init__.py b/nni/nas/evaluator/pytorch/__init__.py --- a/nni/nas/evaluator/pytorch/__init__.py +++ b/nni/nas/evaluator/pytorch/__init__.py @@ -1,4 +1,11 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -from .lightning import * +import warnings + +try: + from .lightning import * +except ImportError: + warnings.warn("PyTorch-Lightning must be installed to use PyTorch in NAS. " + "If you are not using PyTorch, please `nni.set_default_framework('none')`") + raise
{"golden_diff": "diff --git a/nni/nas/evaluator/pytorch/__init__.py b/nni/nas/evaluator/pytorch/__init__.py\n--- a/nni/nas/evaluator/pytorch/__init__.py\n+++ b/nni/nas/evaluator/pytorch/__init__.py\n@@ -1,4 +1,11 @@\n # Copyright (c) Microsoft Corporation.\n # Licensed under the MIT license.\n \n-from .lightning import *\n+import warnings\n+\n+try:\n+ from .lightning import *\n+except ImportError:\n+ warnings.warn(\"PyTorch-Lightning must be installed to use PyTorch in NAS. \"\n+ \"If you are not using PyTorch, please `nni.set_default_framework('none')`\")\n+ raise\n", "issue": "Unclear what extras to install: `import nni.retiarii.execution.api` fails due to missing `pytorch_lightning`\n**Describe the issue**:\r\nI want to use `nni.retiarii.execution.api` module. I've installed it as below:\r\n```\r\nCollecting nni>=2.3\r\n Downloading nni-2.9-py3-none-manylinux1_x86_64.whl (56.0 MB)\r\n```\r\n\r\n**Environment**:\r\n- NNI version: 2.9\r\n- Python version: 3.8\r\n\r\n**Log message**:\r\n```\r\n_________________ ERROR collecting test/3rd_party/test_nni.py __________________\r\nImportError while importing test module '/__w/ai4cl-tianshou/ai4cl-tianshou/test/3rd_party/test_nni.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\n/usr/local/lib/python3.8/importlib/__init__.py:127: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\ntest/3rd_party/test_nni.py:8: in <module>\r\n import nni.retiarii.execution.api\r\n/usr/local/lib/python3.8/site-packages/nni/retiarii/__init__.py:4: in <module>\r\n from .operation import Operation\r\n/usr/local/lib/python3.8/site-packages/nni/retiarii/operation.py:6: in <module>\r\n from nni.nas.execution.common.graph_op import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/__init__.py:4: in <module>\r\n from .execution import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/__init__.py:4: in <module>\r\n from .api import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/api.py:9: in <module>\r\n from nni.nas.execution.common import (\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/__init__.py:4: in <module>\r\n from .engine import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/engine.py:7: in <module>\r\n from .graph import Model, MetricData\r\n/usr/local/lib/python3.8/site-packages/nni/nas/execution/common/graph.py:18: in <module>\r\n from nni.nas.evaluator import Evaluator\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/__init__.py:9: in <module>\r\n shortcut_framework(__name__)\r\n/usr/local/lib/python3.8/site-packages/nni/common/framework.py:93: in shortcut_framework\r\n shortcut_module(current, '.' + get_default_framework(), current)\r\n/usr/local/lib/python3.8/site-packages/nni/common/framework.py:83: in shortcut_module\r\n mod = importlib.import_module(target, package)\r\n/usr/local/lib/python3.8/importlib/__init__.py:127: in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/__init__.py:4: in <module>\r\n from .lightning import *\r\n/usr/local/lib/python3.8/site-packages/nni/nas/evaluator/pytorch/lightning.py:10: in <module>\r\n import pytorch_lightning as pl\r\nE ModuleNotFoundError: No module named 'pytorch_lightning'\r\n```\r\n\r\n**How to reproduce it?**:\r\n```\r\npip install nni==2.9\r\npython -c \"import nni.retiarii.execution.api\"\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\nfrom .lightning import *\n", "path": "nni/nas/evaluator/pytorch/__init__.py"}]}
1,387
172
gh_patches_debug_3315
rasdani/github-patches
git_diff
google__turbinia-1227
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create documentation for the Turbinia API command line tool Write documentation for the new Turbinia API command line tool. </issue> <code> [start of turbinia/api/cli/turbinia_client/core/groups.py] 1 # -*- coding: utf-8 -*- 2 # Copyright 2022 Google Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 """Turbinia API client command-line tool.""" 16 17 import click 18 19 20 @click.group('config') 21 def config_group(): 22 """Get Turbinia configuration.""" 23 24 25 @click.group('status') 26 def status_group(): 27 """Get Turbinia request/task status.""" 28 29 30 @click.group('result') 31 def result_group(): 32 """Get Turbinia task or request results.""" 33 34 35 @click.group('jobs') 36 def jobs_group(): 37 """Get a list of enabled Turbinia jobs.""" 38 39 40 @click.group('submit') 41 def submit_group(): 42 """Submit new requests to the Turbinia API server.""" 43 [end of turbinia/api/cli/turbinia_client/core/groups.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/turbinia/api/cli/turbinia_client/core/groups.py b/turbinia/api/cli/turbinia_client/core/groups.py --- a/turbinia/api/cli/turbinia_client/core/groups.py +++ b/turbinia/api/cli/turbinia_client/core/groups.py @@ -24,12 +24,12 @@ @click.group('status') def status_group(): - """Get Turbinia request/task status.""" + """Get Turbinia request or task status.""" @click.group('result') def result_group(): - """Get Turbinia task or request results.""" + """Get Turbinia request or task results.""" @click.group('jobs')
{"golden_diff": "diff --git a/turbinia/api/cli/turbinia_client/core/groups.py b/turbinia/api/cli/turbinia_client/core/groups.py\n--- a/turbinia/api/cli/turbinia_client/core/groups.py\n+++ b/turbinia/api/cli/turbinia_client/core/groups.py\n@@ -24,12 +24,12 @@\n \n @click.group('status')\n def status_group():\n- \"\"\"Get Turbinia request/task status.\"\"\"\n+ \"\"\"Get Turbinia request or task status.\"\"\"\n \n \n @click.group('result')\n def result_group():\n- \"\"\"Get Turbinia task or request results.\"\"\"\n+ \"\"\"Get Turbinia request or task results.\"\"\"\n \n \n @click.group('jobs')\n", "issue": "Create documentation for the Turbinia API command line tool\nWrite documentation for the new Turbinia API command line tool.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2022 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Turbinia API client command-line tool.\"\"\"\n\nimport click\n\n\[email protected]('config')\ndef config_group():\n \"\"\"Get Turbinia configuration.\"\"\"\n\n\[email protected]('status')\ndef status_group():\n \"\"\"Get Turbinia request/task status.\"\"\"\n\n\[email protected]('result')\ndef result_group():\n \"\"\"Get Turbinia task or request results.\"\"\"\n\n\[email protected]('jobs')\ndef jobs_group():\n \"\"\"Get a list of enabled Turbinia jobs.\"\"\"\n\n\[email protected]('submit')\ndef submit_group():\n \"\"\"Submit new requests to the Turbinia API server.\"\"\"\n", "path": "turbinia/api/cli/turbinia_client/core/groups.py"}]}
919
158
gh_patches_debug_36221
rasdani/github-patches
git_diff
dynaconf__dynaconf-161
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [bug] Regression on config path-traversal **Describe the bug** Configuration is not read correctly for the parent directories **To Reproduce** Steps to reproduce the behavior: ``` /tmp/test$ find . . ./test2 ./.env /tmp/test$ ipython3 -c "from dynaconf import settings; print(settings.ENV_FOR_DYNACONF)" testavalue1 /tmp/test$ cd test2 /tmp/test/test2$ ipython3 -c "from dynaconf import settings; print(settings.ENV_FOR_DYNACONF)" DEVELOPMENT /tmp/test/test2$ cat ../.env ENV_FOR_DYNACONF=testavalue1 ``` 2. Having the following config files: see above 3. Having the following app code: see above 4. Executing under the following environment conda Python 3.7.3 **Expected behavior** Reading the file from the parent directory iff it does not exist in the current dir. **Debug output** <details> <summary> Debug Output </summary> ```bash export `DEBUG_LEVEL_FOR_DYNACONF=true` reproduce your problem and paste the output here --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) <ipython-input-1-065bd64b7197> in <module> ----> 1 from dynaconf import settings; print(settings.ENV_FOR_DYNACONF) ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/__init__.py in <module> 1 # coding: utf-8 ----> 2 from dynaconf.base import LazySettings 3 from dynaconf.validator import Validator, ValidationError 4 from dynaconf.contrib import FlaskDynaconf, DjangoDynaconf 5 ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/base.py in <module> 6 7 from dynaconf import default_settings ----> 8 from dynaconf.loaders import ( 9 default_loader, 10 settings_loader, ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/__init__.py in <module> 2 from dynaconf import constants as ct 3 from dynaconf import default_settings ----> 4 from dynaconf.loaders import ( 5 yaml_loader, toml_loader, json_loader, ini_loader, py_loader 6 ) ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/yaml_loader.py in <module> 4 from warnings import warn 5 from dynaconf import default_settings ----> 6 from dynaconf.loaders.base import BaseLoader 7 from dynaconf.constants import YAML_EXTENSIONS 8 from dynaconf.utils import object_merge ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/base.py in <module> 5 6 ----> 7 logger = raw_logger() 8 9 ~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/utils/__init__.py in raw_logger() 68 return setup_logger( 69 "dynaconf", ---> 70 level=getattr(logging, level) 71 ) 72 except ImportError: # pragma: no cover AttributeError: module 'logging' has no attribute 'true' ``` </details> **Environment (please complete the following information):** - OS: Linux/Ubuntu18.04 - Dynaconf Version 2.0.0 - Frameworks in use: Flask **Additional context** works perfectly in dynaconf < 2.0 </issue> <code> [start of dynaconf/utils/files.py] 1 import os 2 import inspect 3 from dynaconf.utils import raw_logger, deduplicate 4 5 6 def _walk_to_root(path, break_at=None): 7 """ 8 Directories starting from the given directory up to the root or break_at 9 """ 10 if not os.path.exists(path): # pragma: no cover 11 raise IOError('Starting path not found') 12 13 if os.path.isfile(path): # pragma: no cover 14 path = os.path.dirname(path) 15 16 last_dir = None 17 current_dir = os.path.abspath(path) 18 paths = [] 19 while last_dir != current_dir: 20 paths.append(current_dir) 21 paths.append(os.path.join(current_dir, 'config')) 22 if current_dir == os.path.abspath(break_at): # pragma: no cover 23 break 24 parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir)) 25 last_dir, current_dir = current_dir, parent_dir 26 return paths 27 28 29 SEARCHTREE = None 30 31 32 def find_file(filename='.env', project_root=None, skip_files=None, **kwargs): 33 """Search in increasingly higher folders for the given file 34 Returns path to the file if found, or an empty string otherwise. 35 36 This function will build a `search_tree` based on: 37 38 - Project_root if specified 39 - Invoked script location and its parents until root 40 - Current working directory 41 42 For each path in the `search_tree` it will also look for an 43 aditional `./config` folder. 44 """ 45 logger = raw_logger() 46 search_tree = [] 47 work_dir = os.getcwd() 48 skip_files = skip_files or [] 49 50 if project_root is None: 51 logger.debug('No root_path for %s', filename) 52 else: 53 logger.debug('Got root_path %s for %s', project_root, filename) 54 search_tree.extend(_walk_to_root(project_root, break_at=work_dir)) 55 56 script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename)) 57 58 # Path to invoked script and recurivelly to root with its ./config dirs 59 search_tree.extend(_walk_to_root(script_dir, break_at=work_dir)) 60 61 # Where Python interpreter was invoked from and its ./config 62 search_tree.extend([work_dir, os.path.join(work_dir, 'config')]) 63 64 search_tree = deduplicate(search_tree) 65 66 global SEARCHTREE 67 SEARCHTREE != search_tree and logger.debug( 68 'Search Tree: %s', search_tree 69 ) 70 SEARCHTREE = search_tree 71 72 logger.debug('Searching for %s', filename) 73 74 for dirname in search_tree: 75 check_path = os.path.join(dirname, filename) 76 if check_path in skip_files: 77 continue 78 if os.path.exists(check_path): 79 logger.debug('Found: %s', os.path.abspath(check_path)) 80 return check_path # First found will return 81 82 # return empty string if not found so it can still be joined in os.path 83 return '' 84 [end of dynaconf/utils/files.py] [start of dynaconf/utils/__init__.py] 1 # coding: utf-8 2 import os 3 import warnings 4 import functools 5 6 7 BANNER = """ 8 ██████╗ ██╗ ██╗███╗ ██╗ █████╗ ██████╗ ██████╗ ███╗ ██╗███████╗ 9 ██╔══██╗╚██╗ ██╔╝████╗ ██║██╔══██╗██╔════╝██╔═══██╗████╗ ██║██╔════╝ 10 ██║ ██║ ╚████╔╝ ██╔██╗ ██║███████║██║ ██║ ██║██╔██╗ ██║█████╗ 11 ██║ ██║ ╚██╔╝ ██║╚██╗██║██╔══██║██║ ██║ ██║██║╚██╗██║██╔══╝ 12 ██████╔╝ ██║ ██║ ╚████║██║ ██║╚██████╗╚██████╔╝██║ ╚████║██║ 13 ╚═════╝ ╚═╝ ╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═╝ ╚═══╝╚═╝ 14 """ 15 16 if os.name == 'nt': # pragma: no cover 17 # windows can't handle the above charmap 18 BANNER = "DYNACONF" 19 20 21 def object_merge(old, new, unique=False): 22 """ 23 Recursively merge two data structures. 24 25 :param unique: When set to True existing list items are not set. 26 """ 27 if isinstance(old, list) and isinstance(new, list): 28 if old == new: 29 return 30 for item in old[::-1]: 31 if unique and item in new: 32 continue 33 new.insert(0, item) 34 if isinstance(old, dict) and isinstance(new, dict): 35 for key, value in old.items(): 36 if key not in new: 37 new[key] = value 38 else: 39 object_merge(value, new[key]) 40 41 42 class DynaconfDict(dict): 43 """A dict representing en empty Dynaconf object 44 useful to run loaders in to a dict for testing""" 45 def __init__(self, *args, **kwargs): 46 self._loaded_files = [] 47 super(DynaconfDict, self).__init__(*args, **kwargs) 48 49 @property 50 def logger(self): 51 return raw_logger() 52 53 def set(self, key, value, *args, **kwargs): 54 self[key] = value 55 56 @staticmethod 57 def get_environ(key, default=None): # pragma: no cover 58 return os.environ.get(key, default) 59 60 def exists(self, key, **kwargs): 61 return self.get(key, missing) is not missing 62 63 64 @functools.lru_cache() 65 def _logger(level): 66 import logging 67 logging.basicConfig( 68 format=( 69 '%(asctime)s,%(msecs)d %(levelname)-8s ' 70 '[%(filename)s:%(lineno)d - %(funcName)s] %(message)s' 71 ), 72 datefmt='%Y-%m-%d:%H:%M:%S', 73 level=getattr(logging, level) 74 ) 75 logger = logging.getLogger("dynaconf") 76 return logger 77 78 79 def raw_logger(level=None): 80 """Get or create inner logger""" 81 level = level or os.environ.get('DEBUG_LEVEL_FOR_DYNACONF', 'ERROR') 82 return _logger(level) 83 84 85 RENAMED_VARS = { 86 # old: new 87 'DYNACONF_NAMESPACE': 'ENV_FOR_DYNACONF', 88 'NAMESPACE_FOR_DYNACONF': 'ENV_FOR_DYNACONF', 89 'DYNACONF_SETTINGS_MODULE': 'SETTINGS_MODULE_FOR_DYNACONF', 90 'DYNACONF_SETTINGS': 'SETTINGS_MODULE_FOR_DYNACONF', 91 'SETTINGS_MODULE': 'SETTINGS_MODULE_FOR_DYNACONF', 92 'PROJECT_ROOT': 'ROOT_PATH_FOR_DYNACONF', 93 'PROJECT_ROOT_FOR_DYNACONF': 'ROOT_PATH_FOR_DYNACONF', 94 'DYNACONF_SILENT_ERRORS': 'SILENT_ERRORS_FOR_DYNACONF', 95 'DYNACONF_ALWAYS_FRESH_VARS': 'FRESH_VARS_FOR_DYNACONF', 96 'BASE_NAMESPACE_FOR_DYNACONF': 'DEFAULT_ENV_FOR_DYNACONF' 97 } 98 99 100 def compat_kwargs(kwargs): 101 """To keep backwards compat change the kwargs to new names""" 102 warn_deprecations(kwargs) 103 for old, new in RENAMED_VARS.items(): 104 if old in kwargs: 105 kwargs[new] = kwargs[old] 106 107 108 class Missing(object): 109 """ 110 Sentinel value object/singleton used to differentiate between ambiguous 111 situations where `None` is a valid value. 112 """ 113 114 def __bool__(self): 115 """Respond to boolean duck-typing.""" 116 return False 117 118 def __eq__(self, other): 119 """Equality check for a singleton.""" 120 121 return isinstance(other, self.__class__) 122 123 # Ensure compatibility with Python 2.x 124 __nonzero__ = __bool__ 125 126 def __repr__(self): 127 """ 128 Unambiguously identify this string-based representation of Missing, 129 used as a singleton. 130 """ 131 return '<dynaconf.missing>' 132 133 134 missing = Missing() 135 136 137 def deduplicate(list_object): 138 """Rebuild `list_object` removing duplicated and keeping order""" 139 new = [] 140 for item in list_object: 141 if item not in new: 142 new.append(item) 143 return new 144 145 146 def warn_deprecations(data): 147 if data.get('MERGE_ENABLED_FOR_DYNACONF'): 148 warnings.warn( 149 "MERGE_ENABLED_FOR_DYNACONF is deprecated " 150 "instead it is prefered to use the local merge feature " 151 "see: https://dynaconf.readthedocs.io/en/latest/guides/usage.html" 152 "#merging-existing-values", 153 DeprecationWarning 154 ) 155 for old, new in RENAMED_VARS.items(): 156 if old in data: 157 warnings.warn( 158 "You are using %s which is a deprecated settings " 159 "replace it with %s" % (old, new), 160 DeprecationWarning 161 ) 162 [end of dynaconf/utils/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dynaconf/utils/__init__.py b/dynaconf/utils/__init__.py --- a/dynaconf/utils/__init__.py +++ b/dynaconf/utils/__init__.py @@ -70,7 +70,7 @@ '[%(filename)s:%(lineno)d - %(funcName)s] %(message)s' ), datefmt='%Y-%m-%d:%H:%M:%S', - level=getattr(logging, level) + level=getattr(logging, level, 'DEBUG') ) logger = logging.getLogger("dynaconf") return logger diff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py --- a/dynaconf/utils/files.py +++ b/dynaconf/utils/files.py @@ -3,6 +3,9 @@ from dynaconf.utils import raw_logger, deduplicate +logger = raw_logger() + + def _walk_to_root(path, break_at=None): """ Directories starting from the given directory up to the root or break_at @@ -19,7 +22,8 @@ while last_dir != current_dir: paths.append(current_dir) paths.append(os.path.join(current_dir, 'config')) - if current_dir == os.path.abspath(break_at): # pragma: no cover + if break_at and current_dir == os.path.abspath(break_at): # noqa + logger.debug('Reached the %s directory, breaking.', break_at) break parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir)) last_dir, current_dir = current_dir, parent_dir @@ -42,7 +46,6 @@ For each path in the `search_tree` it will also look for an aditional `./config` folder. """ - logger = raw_logger() search_tree = [] work_dir = os.getcwd() skip_files = skip_files or [] @@ -55,8 +58,8 @@ script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename)) - # Path to invoked script and recurivelly to root with its ./config dirs - search_tree.extend(_walk_to_root(script_dir, break_at=work_dir)) + # Path to invoked script and recursivelly to root with its ./config dirs + search_tree.extend(_walk_to_root(script_dir)) # Where Python interpreter was invoked from and its ./config search_tree.extend([work_dir, os.path.join(work_dir, 'config')])
{"golden_diff": "diff --git a/dynaconf/utils/__init__.py b/dynaconf/utils/__init__.py\n--- a/dynaconf/utils/__init__.py\n+++ b/dynaconf/utils/__init__.py\n@@ -70,7 +70,7 @@\n '[%(filename)s:%(lineno)d - %(funcName)s] %(message)s'\n ),\n datefmt='%Y-%m-%d:%H:%M:%S',\n- level=getattr(logging, level)\n+ level=getattr(logging, level, 'DEBUG')\n )\n logger = logging.getLogger(\"dynaconf\")\n return logger\ndiff --git a/dynaconf/utils/files.py b/dynaconf/utils/files.py\n--- a/dynaconf/utils/files.py\n+++ b/dynaconf/utils/files.py\n@@ -3,6 +3,9 @@\n from dynaconf.utils import raw_logger, deduplicate\n \n \n+logger = raw_logger()\n+\n+\n def _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n@@ -19,7 +22,8 @@\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, 'config'))\n- if current_dir == os.path.abspath(break_at): # pragma: no cover\n+ if break_at and current_dir == os.path.abspath(break_at): # noqa\n+ logger.debug('Reached the %s directory, breaking.', break_at)\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n@@ -42,7 +46,6 @@\n For each path in the `search_tree` it will also look for an\n aditional `./config` folder.\n \"\"\"\n- logger = raw_logger()\n search_tree = []\n work_dir = os.getcwd()\n skip_files = skip_files or []\n@@ -55,8 +58,8 @@\n \n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n \n- # Path to invoked script and recurivelly to root with its ./config dirs\n- search_tree.extend(_walk_to_root(script_dir, break_at=work_dir))\n+ # Path to invoked script and recursivelly to root with its ./config dirs\n+ search_tree.extend(_walk_to_root(script_dir))\n \n # Where Python interpreter was invoked from and its ./config\n search_tree.extend([work_dir, os.path.join(work_dir, 'config')])\n", "issue": "[bug] Regression on config path-traversal\n**Describe the bug**\r\nConfiguration is not read correctly for the parent directories\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n```\r\n /tmp/test$ find .\r\n.\r\n./test2\r\n./.env\r\n\r\n/tmp/test$ ipython3 -c \"from dynaconf import settings; print(settings.ENV_FOR_DYNACONF)\"\r\ntestavalue1\r\n\r\n/tmp/test$ cd test2\r\n/tmp/test/test2$ ipython3 -c \"from dynaconf import settings; print(settings.ENV_FOR_DYNACONF)\"\r\nDEVELOPMENT\r\n\r\n/tmp/test/test2$ cat ../.env\r\nENV_FOR_DYNACONF=testavalue1\r\n```\r\n\r\n\r\n2. Having the following config files:\r\n\r\nsee above\r\n\r\n3. Having the following app code:\r\n\r\nsee above\r\n\r\n4. Executing under the following environment\r\nconda\r\nPython 3.7.3\r\n\r\n\r\n**Expected behavior**\r\n\r\nReading the file from the parent directory iff it does not exist in the current dir.\r\n\r\n**Debug output**\r\n\r\n<details>\r\n<summary> Debug Output </summary>\r\n\r\n```bash\r\n\r\nexport `DEBUG_LEVEL_FOR_DYNACONF=true` reproduce your problem and paste the output here\r\n\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-1-065bd64b7197> in <module>\r\n----> 1 from dynaconf import settings; print(settings.ENV_FOR_DYNACONF)\r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/__init__.py in <module>\r\n 1 # coding: utf-8\r\n----> 2 from dynaconf.base import LazySettings\r\n 3 from dynaconf.validator import Validator, ValidationError\r\n 4 from dynaconf.contrib import FlaskDynaconf, DjangoDynaconf\r\n 5 \r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/base.py in <module>\r\n 6 \r\n 7 from dynaconf import default_settings\r\n----> 8 from dynaconf.loaders import (\r\n 9 default_loader,\r\n 10 settings_loader,\r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/__init__.py in <module>\r\n 2 from dynaconf import constants as ct\r\n 3 from dynaconf import default_settings\r\n----> 4 from dynaconf.loaders import (\r\n 5 yaml_loader, toml_loader, json_loader, ini_loader, py_loader\r\n 6 )\r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/yaml_loader.py in <module>\r\n 4 from warnings import warn\r\n 5 from dynaconf import default_settings\r\n----> 6 from dynaconf.loaders.base import BaseLoader\r\n 7 from dynaconf.constants import YAML_EXTENSIONS\r\n 8 from dynaconf.utils import object_merge\r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/loaders/base.py in <module>\r\n 5 \r\n 6 \r\n----> 7 logger = raw_logger()\r\n 8 \r\n 9 \r\n\r\n~/miniconda3/envs/smr/lib/python3.7/site-packages/dynaconf/utils/__init__.py in raw_logger()\r\n 68 return setup_logger(\r\n 69 \"dynaconf\",\r\n---> 70 level=getattr(logging, level)\r\n 71 )\r\n 72 except ImportError: # pragma: no cover\r\n\r\nAttributeError: module 'logging' has no attribute 'true'\r\n\r\n```\r\n\r\n</details>\r\n\r\n**Environment (please complete the following information):**\r\n - OS: Linux/Ubuntu18.04\r\n - Dynaconf Version 2.0.0\r\n - Frameworks in use: Flask\r\n\r\n**Additional context**\r\n\r\nworks perfectly in dynaconf < 2.0\n", "before_files": [{"content": "import os\nimport inspect\nfrom dynaconf.utils import raw_logger, deduplicate\n\n\ndef _walk_to_root(path, break_at=None):\n \"\"\"\n Directories starting from the given directory up to the root or break_at\n \"\"\"\n if not os.path.exists(path): # pragma: no cover\n raise IOError('Starting path not found')\n\n if os.path.isfile(path): # pragma: no cover\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n paths = []\n while last_dir != current_dir:\n paths.append(current_dir)\n paths.append(os.path.join(current_dir, 'config'))\n if current_dir == os.path.abspath(break_at): # pragma: no cover\n break\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir\n return paths\n\n\nSEARCHTREE = None\n\n\ndef find_file(filename='.env', project_root=None, skip_files=None, **kwargs):\n \"\"\"Search in increasingly higher folders for the given file\n Returns path to the file if found, or an empty string otherwise.\n\n This function will build a `search_tree` based on:\n\n - Project_root if specified\n - Invoked script location and its parents until root\n - Current working directory\n\n For each path in the `search_tree` it will also look for an\n aditional `./config` folder.\n \"\"\"\n logger = raw_logger()\n search_tree = []\n work_dir = os.getcwd()\n skip_files = skip_files or []\n\n if project_root is None:\n logger.debug('No root_path for %s', filename)\n else:\n logger.debug('Got root_path %s for %s', project_root, filename)\n search_tree.extend(_walk_to_root(project_root, break_at=work_dir))\n\n script_dir = os.path.dirname(os.path.abspath(inspect.stack()[-1].filename))\n\n # Path to invoked script and recurivelly to root with its ./config dirs\n search_tree.extend(_walk_to_root(script_dir, break_at=work_dir))\n\n # Where Python interpreter was invoked from and its ./config\n search_tree.extend([work_dir, os.path.join(work_dir, 'config')])\n\n search_tree = deduplicate(search_tree)\n\n global SEARCHTREE\n SEARCHTREE != search_tree and logger.debug(\n 'Search Tree: %s', search_tree\n )\n SEARCHTREE = search_tree\n\n logger.debug('Searching for %s', filename)\n\n for dirname in search_tree:\n check_path = os.path.join(dirname, filename)\n if check_path in skip_files:\n continue\n if os.path.exists(check_path):\n logger.debug('Found: %s', os.path.abspath(check_path))\n return check_path # First found will return\n\n # return empty string if not found so it can still be joined in os.path\n return ''\n", "path": "dynaconf/utils/files.py"}, {"content": "# coding: utf-8\nimport os\nimport warnings\nimport functools\n\n\nBANNER = \"\"\"\n\u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2557\u2588\u2588\u2588\u2557 \u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2588\u2557 \u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2557\n\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u255a\u2588\u2588\u2557 \u2588\u2588\u2554\u255d\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\u2588\u2588\u2554\u2550\u2550\u2550\u2588\u2588\u2557\u2588\u2588\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2550\u2550\u255d\n\u2588\u2588\u2551 \u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2554\u255d \u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2554\u2588\u2588\u2557 \u2588\u2588\u2551\u2588\u2588\u2588\u2588\u2588\u2557\n\u2588\u2588\u2551 \u2588\u2588\u2551 \u255a\u2588\u2588\u2554\u255d \u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551 \u2588\u2588\u2551\u2588\u2588\u2551\u255a\u2588\u2588\u2557\u2588\u2588\u2551\u2588\u2588\u2554\u2550\u2550\u255d\n\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d \u2588\u2588\u2551 \u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551 \u2588\u2588\u2551\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2557\u255a\u2588\u2588\u2588\u2588\u2588\u2588\u2554\u255d\u2588\u2588\u2551 \u255a\u2588\u2588\u2588\u2588\u2551\u2588\u2588\u2551\n\u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u2550\u2550\u2550\u2550\u255d \u255a\u2550\u255d \u255a\u2550\u2550\u2550\u255d\u255a\u2550\u255d\n\"\"\"\n\nif os.name == 'nt': # pragma: no cover\n # windows can't handle the above charmap\n BANNER = \"DYNACONF\"\n\n\ndef object_merge(old, new, unique=False):\n \"\"\"\n Recursively merge two data structures.\n\n :param unique: When set to True existing list items are not set.\n \"\"\"\n if isinstance(old, list) and isinstance(new, list):\n if old == new:\n return\n for item in old[::-1]:\n if unique and item in new:\n continue\n new.insert(0, item)\n if isinstance(old, dict) and isinstance(new, dict):\n for key, value in old.items():\n if key not in new:\n new[key] = value\n else:\n object_merge(value, new[key])\n\n\nclass DynaconfDict(dict):\n \"\"\"A dict representing en empty Dynaconf object\n useful to run loaders in to a dict for testing\"\"\"\n def __init__(self, *args, **kwargs):\n self._loaded_files = []\n super(DynaconfDict, self).__init__(*args, **kwargs)\n\n @property\n def logger(self):\n return raw_logger()\n\n def set(self, key, value, *args, **kwargs):\n self[key] = value\n\n @staticmethod\n def get_environ(key, default=None): # pragma: no cover\n return os.environ.get(key, default)\n\n def exists(self, key, **kwargs):\n return self.get(key, missing) is not missing\n\n\[email protected]_cache()\ndef _logger(level):\n import logging\n logging.basicConfig(\n format=(\n '%(asctime)s,%(msecs)d %(levelname)-8s '\n '[%(filename)s:%(lineno)d - %(funcName)s] %(message)s'\n ),\n datefmt='%Y-%m-%d:%H:%M:%S',\n level=getattr(logging, level)\n )\n logger = logging.getLogger(\"dynaconf\")\n return logger\n\n\ndef raw_logger(level=None):\n \"\"\"Get or create inner logger\"\"\"\n level = level or os.environ.get('DEBUG_LEVEL_FOR_DYNACONF', 'ERROR')\n return _logger(level)\n\n\nRENAMED_VARS = {\n # old: new\n 'DYNACONF_NAMESPACE': 'ENV_FOR_DYNACONF',\n 'NAMESPACE_FOR_DYNACONF': 'ENV_FOR_DYNACONF',\n 'DYNACONF_SETTINGS_MODULE': 'SETTINGS_MODULE_FOR_DYNACONF',\n 'DYNACONF_SETTINGS': 'SETTINGS_MODULE_FOR_DYNACONF',\n 'SETTINGS_MODULE': 'SETTINGS_MODULE_FOR_DYNACONF',\n 'PROJECT_ROOT': 'ROOT_PATH_FOR_DYNACONF',\n 'PROJECT_ROOT_FOR_DYNACONF': 'ROOT_PATH_FOR_DYNACONF',\n 'DYNACONF_SILENT_ERRORS': 'SILENT_ERRORS_FOR_DYNACONF',\n 'DYNACONF_ALWAYS_FRESH_VARS': 'FRESH_VARS_FOR_DYNACONF',\n 'BASE_NAMESPACE_FOR_DYNACONF': 'DEFAULT_ENV_FOR_DYNACONF'\n}\n\n\ndef compat_kwargs(kwargs):\n \"\"\"To keep backwards compat change the kwargs to new names\"\"\"\n warn_deprecations(kwargs)\n for old, new in RENAMED_VARS.items():\n if old in kwargs:\n kwargs[new] = kwargs[old]\n\n\nclass Missing(object):\n \"\"\"\n Sentinel value object/singleton used to differentiate between ambiguous\n situations where `None` is a valid value.\n \"\"\"\n\n def __bool__(self):\n \"\"\"Respond to boolean duck-typing.\"\"\"\n return False\n\n def __eq__(self, other):\n \"\"\"Equality check for a singleton.\"\"\"\n\n return isinstance(other, self.__class__)\n\n # Ensure compatibility with Python 2.x\n __nonzero__ = __bool__\n\n def __repr__(self):\n \"\"\"\n Unambiguously identify this string-based representation of Missing,\n used as a singleton.\n \"\"\"\n return '<dynaconf.missing>'\n\n\nmissing = Missing()\n\n\ndef deduplicate(list_object):\n \"\"\"Rebuild `list_object` removing duplicated and keeping order\"\"\"\n new = []\n for item in list_object:\n if item not in new:\n new.append(item)\n return new\n\n\ndef warn_deprecations(data):\n if data.get('MERGE_ENABLED_FOR_DYNACONF'):\n warnings.warn(\n \"MERGE_ENABLED_FOR_DYNACONF is deprecated \"\n \"instead it is prefered to use the local merge feature \"\n \"see: https://dynaconf.readthedocs.io/en/latest/guides/usage.html\"\n \"#merging-existing-values\",\n DeprecationWarning\n )\n for old, new in RENAMED_VARS.items():\n if old in data:\n warnings.warn(\n \"You are using %s which is a deprecated settings \"\n \"replace it with %s\" % (old, new),\n DeprecationWarning\n )\n", "path": "dynaconf/utils/__init__.py"}]}
3,958
553
gh_patches_debug_27407
rasdani/github-patches
git_diff
wright-group__WrightTools-931
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> consider using NumExpr as alternative to eval Within the units library, we use eval. https://github.com/wright-group/WrightTools/blob/master/WrightTools/units.py#L96-L135 We could use NumExpr... our usage case seems to be exactly what the package was made for. https://numexpr.readthedocs.io/en/latest/user_guide.html </issue> <code> [start of WrightTools/units.py] 1 """Unit and label handling in WrightTools.""" 2 3 4 # --- import -------------------------------------------------------------------------------------- 5 6 7 import collections 8 9 import numpy as np 10 import warnings 11 12 13 # --- define -------------------------------------------------------------------------------------- 14 15 16 # units are stored in dictionaries of like kind. format: 17 # unit : to native, from native, units_symbol, units_label 18 19 # angle units (native: rad) 20 angle = {"rad": ["x", "x", r"rad"], "deg": ["x/57.2958", "57.2958*x", r"deg"]} 21 22 # delay units (native: fs) 23 fs_per_mm = 3336. 24 delay = { 25 "fs": ["x", "x", r"fs"], 26 "ps": ["x*1e3", "x/1e3", r"ps"], 27 "ns": ["x*1e6", "x/1e6", r"ns"], 28 "mm_delay": ["x*2*fs_per_mm", "x/(2*fs_per_mm)", r"mm"], 29 } 30 31 # energy units (native: nm) 32 energy = { 33 "nm": ["x", "x", r"nm"], 34 "wn": ["1e7/x", "1e7/x", r"cm^{-1}"], 35 "eV": ["1240./x", "1240./x", r"eV"], 36 "meV": ["1240000./x", "1240000./x", r"meV"], 37 "Hz": ["2.99792458e17/x", "2.99792458e17/x", r"Hz"], 38 "THz": ["2.99792458e5/x", "2.99792458e5/x", r"THz"], 39 "GHz": ["2.99792458e8/x", "2.99792458e8/x", r"GHz"], 40 } 41 42 # fluence units (native: uJ per sq. cm) 43 fluence = {"uJ per sq. cm": ["x", "x", r"\frac{\mu J}{cm^{2}}"]} 44 45 # optical density units (native: od) 46 od = {"mOD": ["1e3*x", "x/1e3", r"mOD"], "OD": ["x", "x", r"OD"]} 47 48 # position units (native: mm) 49 position = { 50 "nm_p": ["x/1e6", "1e6*x", r"nm"], 51 "um": ["x/1000.", "1000.*x", r"um"], 52 "mm": ["x", "x", r"mm"], 53 "cm": ["10.*x", "x/10.", r"cm"], 54 "in": ["x*0.039370", "0.039370*x", r"in"], 55 } 56 57 # pulse width units (native: FWHM) 58 pulse_width = {"FWHM": ["x", "x", r"FWHM"]} 59 60 # temperature units (native: K) 61 temperature = { 62 "K": ["x", "x", r"K"], 63 "deg_C": ["x+273.15", "x-273.15", r"^\circ C"], 64 "deg_F": ["(x+459.67)*5/9", "x*9/5-456.67", r"^\circ F"], 65 "deg_R": ["x*5/9", "x*9/5", r"^\circ R"], 66 } 67 68 # time units (native: s) 69 time = { 70 "fs_t": ["x/1e15", "x*1e15", r"fs"], 71 "ps_t": ["x/1e12", "x*1e12", r"ps"], 72 "ns_t": ["x/1e9", "x*1e9", r"ns"], 73 "us_t": ["x/1e6", "x*1e6", r"us"], 74 "ms_t": ["x/1000.", "x*1000.", r"ms"], 75 "s_t": ["x", "x", r"s"], 76 "m_t": ["x*60.", "x/60.", r"m"], 77 "h_t": ["x*3600.", "x/3600.", r"h"], 78 "d_t": ["x*86400.", "x/86400.", r"d"], 79 } 80 81 dicts = collections.OrderedDict() 82 dicts["angle"] = angle 83 dicts["delay"] = delay 84 dicts["energy"] = energy 85 dicts["time"] = time 86 dicts["position"] = position 87 dicts["pulse_width"] = pulse_width 88 dicts["fluence"] = fluence 89 dicts["od"] = od 90 dicts["temperature"] = temperature 91 92 93 # --- functions ----------------------------------------------------------------------------------- 94 95 96 def converter(val, current_unit, destination_unit): 97 """Convert from one unit to another. 98 99 Parameters 100 ---------- 101 val : number 102 Number to convert. 103 current_unit : string 104 Current unit. 105 destination_unit : string 106 Destination unit. 107 108 Returns 109 ------- 110 number 111 Converted value. 112 """ 113 x = val 114 for dic in dicts.values(): 115 if current_unit in dic.keys() and destination_unit in dic.keys(): 116 try: 117 native = eval(dic[current_unit][0]) 118 except ZeroDivisionError: 119 native = np.inf 120 x = native # noqa: F841 121 try: 122 out = eval(dic[destination_unit][1]) 123 except ZeroDivisionError: 124 out = np.inf 125 return out 126 # if all dictionaries fail 127 if current_unit is None and destination_unit is None: 128 pass 129 else: 130 warnings.warn( 131 "conversion {0} to {1} not valid: returning input".format( 132 current_unit, destination_unit 133 ) 134 ) 135 return val 136 137 138 convert = converter 139 140 141 def get_symbol(units) -> str: 142 """Get default symbol type. 143 144 Parameters 145 ---------- 146 units_str : string 147 Units. 148 149 Returns 150 ------- 151 string 152 LaTeX formatted symbol. 153 """ 154 if kind(units) == "energy": 155 d = {} 156 d["nm"] = r"\lambda" 157 d["wn"] = r"\bar\nu" 158 d["eV"] = r"\hslash\omega" 159 d["Hz"] = r"f" 160 d["THz"] = r"f" 161 d["GHz"] = r"f" 162 return d.get(units, "E") 163 elif kind(units) == "delay": 164 return r"\tau" 165 elif kind(units) == "fluence": 166 return r"\mathcal{F}" 167 elif kind(units) == "pulse_width": 168 return r"\sigma" 169 elif kind(units) == "temperature": 170 return r"T" 171 else: 172 return kind(units) 173 174 175 def get_valid_conversions(units) -> tuple: 176 try: 177 valid = list(dicts[kind(units)]) 178 except KeyError: 179 return () 180 valid.remove(units) 181 return tuple(valid) 182 183 184 def is_valid_conversion(a, b) -> bool: 185 for dic in dicts.values(): 186 if a in dic.keys() and b in dic.keys(): 187 return True 188 if a is None and b is None: 189 return True 190 else: 191 return False 192 193 194 def kind(units): 195 """Find the kind of given units. 196 197 Parameters 198 ---------- 199 units : string 200 The units of interest 201 202 Returns 203 ------- 204 string 205 The kind of the given units. If no match is found, returns None. 206 """ 207 for k, v in dicts.items(): 208 if units in v.keys(): 209 return k 210 [end of WrightTools/units.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/WrightTools/units.py b/WrightTools/units.py --- a/WrightTools/units.py +++ b/WrightTools/units.py @@ -5,9 +5,10 @@ import collections +import warnings import numpy as np -import warnings +import numexpr # --- define -------------------------------------------------------------------------------------- @@ -20,7 +21,7 @@ angle = {"rad": ["x", "x", r"rad"], "deg": ["x/57.2958", "57.2958*x", r"deg"]} # delay units (native: fs) -fs_per_mm = 3336. +fs_per_mm = 3336.0 delay = { "fs": ["x", "x", r"fs"], "ps": ["x*1e3", "x/1e3", r"ps"], @@ -114,12 +115,12 @@ for dic in dicts.values(): if current_unit in dic.keys() and destination_unit in dic.keys(): try: - native = eval(dic[current_unit][0]) + native = numexpr.evaluate(dic[current_unit][0], {"x": x}) except ZeroDivisionError: native = np.inf x = native # noqa: F841 try: - out = eval(dic[destination_unit][1]) + out = numexpr.evaluate(dic[destination_unit][1], {"x": x}) except ZeroDivisionError: out = np.inf return out
{"golden_diff": "diff --git a/WrightTools/units.py b/WrightTools/units.py\n--- a/WrightTools/units.py\n+++ b/WrightTools/units.py\n@@ -5,9 +5,10 @@\n \n \n import collections\n+import warnings\n \n import numpy as np\n-import warnings\n+import numexpr\n \n \n # --- define --------------------------------------------------------------------------------------\n@@ -20,7 +21,7 @@\n angle = {\"rad\": [\"x\", \"x\", r\"rad\"], \"deg\": [\"x/57.2958\", \"57.2958*x\", r\"deg\"]}\n \n # delay units (native: fs)\n-fs_per_mm = 3336.\n+fs_per_mm = 3336.0\n delay = {\n \"fs\": [\"x\", \"x\", r\"fs\"],\n \"ps\": [\"x*1e3\", \"x/1e3\", r\"ps\"],\n@@ -114,12 +115,12 @@\n for dic in dicts.values():\n if current_unit in dic.keys() and destination_unit in dic.keys():\n try:\n- native = eval(dic[current_unit][0])\n+ native = numexpr.evaluate(dic[current_unit][0], {\"x\": x})\n except ZeroDivisionError:\n native = np.inf\n x = native # noqa: F841\n try:\n- out = eval(dic[destination_unit][1])\n+ out = numexpr.evaluate(dic[destination_unit][1], {\"x\": x})\n except ZeroDivisionError:\n out = np.inf\n return out\n", "issue": "consider using NumExpr as alternative to eval\nWithin the units library, we use eval.\r\n\r\nhttps://github.com/wright-group/WrightTools/blob/master/WrightTools/units.py#L96-L135\r\n\r\nWe could use NumExpr... our usage case seems to be exactly what the package was made for.\r\n\r\nhttps://numexpr.readthedocs.io/en/latest/user_guide.html\n", "before_files": [{"content": "\"\"\"Unit and label handling in WrightTools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\n\nimport numpy as np\nimport warnings\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n# units are stored in dictionaries of like kind. format:\n# unit : to native, from native, units_symbol, units_label\n\n# angle units (native: rad)\nangle = {\"rad\": [\"x\", \"x\", r\"rad\"], \"deg\": [\"x/57.2958\", \"57.2958*x\", r\"deg\"]}\n\n# delay units (native: fs)\nfs_per_mm = 3336.\ndelay = {\n \"fs\": [\"x\", \"x\", r\"fs\"],\n \"ps\": [\"x*1e3\", \"x/1e3\", r\"ps\"],\n \"ns\": [\"x*1e6\", \"x/1e6\", r\"ns\"],\n \"mm_delay\": [\"x*2*fs_per_mm\", \"x/(2*fs_per_mm)\", r\"mm\"],\n}\n\n# energy units (native: nm)\nenergy = {\n \"nm\": [\"x\", \"x\", r\"nm\"],\n \"wn\": [\"1e7/x\", \"1e7/x\", r\"cm^{-1}\"],\n \"eV\": [\"1240./x\", \"1240./x\", r\"eV\"],\n \"meV\": [\"1240000./x\", \"1240000./x\", r\"meV\"],\n \"Hz\": [\"2.99792458e17/x\", \"2.99792458e17/x\", r\"Hz\"],\n \"THz\": [\"2.99792458e5/x\", \"2.99792458e5/x\", r\"THz\"],\n \"GHz\": [\"2.99792458e8/x\", \"2.99792458e8/x\", r\"GHz\"],\n}\n\n# fluence units (native: uJ per sq. cm)\nfluence = {\"uJ per sq. cm\": [\"x\", \"x\", r\"\\frac{\\mu J}{cm^{2}}\"]}\n\n# optical density units (native: od)\nod = {\"mOD\": [\"1e3*x\", \"x/1e3\", r\"mOD\"], \"OD\": [\"x\", \"x\", r\"OD\"]}\n\n# position units (native: mm)\nposition = {\n \"nm_p\": [\"x/1e6\", \"1e6*x\", r\"nm\"],\n \"um\": [\"x/1000.\", \"1000.*x\", r\"um\"],\n \"mm\": [\"x\", \"x\", r\"mm\"],\n \"cm\": [\"10.*x\", \"x/10.\", r\"cm\"],\n \"in\": [\"x*0.039370\", \"0.039370*x\", r\"in\"],\n}\n\n# pulse width units (native: FWHM)\npulse_width = {\"FWHM\": [\"x\", \"x\", r\"FWHM\"]}\n\n# temperature units (native: K)\ntemperature = {\n \"K\": [\"x\", \"x\", r\"K\"],\n \"deg_C\": [\"x+273.15\", \"x-273.15\", r\"^\\circ C\"],\n \"deg_F\": [\"(x+459.67)*5/9\", \"x*9/5-456.67\", r\"^\\circ F\"],\n \"deg_R\": [\"x*5/9\", \"x*9/5\", r\"^\\circ R\"],\n}\n\n# time units (native: s)\ntime = {\n \"fs_t\": [\"x/1e15\", \"x*1e15\", r\"fs\"],\n \"ps_t\": [\"x/1e12\", \"x*1e12\", r\"ps\"],\n \"ns_t\": [\"x/1e9\", \"x*1e9\", r\"ns\"],\n \"us_t\": [\"x/1e6\", \"x*1e6\", r\"us\"],\n \"ms_t\": [\"x/1000.\", \"x*1000.\", r\"ms\"],\n \"s_t\": [\"x\", \"x\", r\"s\"],\n \"m_t\": [\"x*60.\", \"x/60.\", r\"m\"],\n \"h_t\": [\"x*3600.\", \"x/3600.\", r\"h\"],\n \"d_t\": [\"x*86400.\", \"x/86400.\", r\"d\"],\n}\n\ndicts = collections.OrderedDict()\ndicts[\"angle\"] = angle\ndicts[\"delay\"] = delay\ndicts[\"energy\"] = energy\ndicts[\"time\"] = time\ndicts[\"position\"] = position\ndicts[\"pulse_width\"] = pulse_width\ndicts[\"fluence\"] = fluence\ndicts[\"od\"] = od\ndicts[\"temperature\"] = temperature\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef converter(val, current_unit, destination_unit):\n \"\"\"Convert from one unit to another.\n\n Parameters\n ----------\n val : number\n Number to convert.\n current_unit : string\n Current unit.\n destination_unit : string\n Destination unit.\n\n Returns\n -------\n number\n Converted value.\n \"\"\"\n x = val\n for dic in dicts.values():\n if current_unit in dic.keys() and destination_unit in dic.keys():\n try:\n native = eval(dic[current_unit][0])\n except ZeroDivisionError:\n native = np.inf\n x = native # noqa: F841\n try:\n out = eval(dic[destination_unit][1])\n except ZeroDivisionError:\n out = np.inf\n return out\n # if all dictionaries fail\n if current_unit is None and destination_unit is None:\n pass\n else:\n warnings.warn(\n \"conversion {0} to {1} not valid: returning input\".format(\n current_unit, destination_unit\n )\n )\n return val\n\n\nconvert = converter\n\n\ndef get_symbol(units) -> str:\n \"\"\"Get default symbol type.\n\n Parameters\n ----------\n units_str : string\n Units.\n\n Returns\n -------\n string\n LaTeX formatted symbol.\n \"\"\"\n if kind(units) == \"energy\":\n d = {}\n d[\"nm\"] = r\"\\lambda\"\n d[\"wn\"] = r\"\\bar\\nu\"\n d[\"eV\"] = r\"\\hslash\\omega\"\n d[\"Hz\"] = r\"f\"\n d[\"THz\"] = r\"f\"\n d[\"GHz\"] = r\"f\"\n return d.get(units, \"E\")\n elif kind(units) == \"delay\":\n return r\"\\tau\"\n elif kind(units) == \"fluence\":\n return r\"\\mathcal{F}\"\n elif kind(units) == \"pulse_width\":\n return r\"\\sigma\"\n elif kind(units) == \"temperature\":\n return r\"T\"\n else:\n return kind(units)\n\n\ndef get_valid_conversions(units) -> tuple:\n try:\n valid = list(dicts[kind(units)])\n except KeyError:\n return ()\n valid.remove(units)\n return tuple(valid)\n\n\ndef is_valid_conversion(a, b) -> bool:\n for dic in dicts.values():\n if a in dic.keys() and b in dic.keys():\n return True\n if a is None and b is None:\n return True\n else:\n return False\n\n\ndef kind(units):\n \"\"\"Find the kind of given units.\n\n Parameters\n ----------\n units : string\n The units of interest\n\n Returns\n -------\n string\n The kind of the given units. If no match is found, returns None.\n \"\"\"\n for k, v in dicts.items():\n if units in v.keys():\n return k\n", "path": "WrightTools/units.py"}]}
2,876
346
gh_patches_debug_18101
rasdani/github-patches
git_diff
streamlink__streamlink-5569
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins.pandalive: HTTP status 400 on API call ### Checklist - [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose) - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version streamlink 6.2.0 ### Description the livestreams can't resolved on pandalive it was always resolved 400 Client Error ### Debug log ```text C:\Users\Jerry>C:\APP\Streamlink\bin\streamlink.exe https://www.pandalive.co.kr/live/play/pocet00 --loglevel=debug [session][debug] Plugin pandalive is being overridden by C:\Users\Jerry\AppData\Roaming\streamlink\plugins\pandalive.py [cli][debug] OS: Windows 10 [cli][debug] Python: 3.11.5 [cli][debug] OpenSSL: OpenSSL 3.0.9 30 May 2023 [cli][debug] Streamlink: 6.2.0 [cli][debug] Dependencies: [cli][debug] certifi: 2023.7.22 [cli][debug] isodate: 0.6.1 [cli][debug] lxml: 4.9.3 [cli][debug] pycountry: 22.3.5 [cli][debug] pycryptodome: 3.18.0 [cli][debug] PySocks: 1.7.1 [cli][debug] requests: 2.31.0 [cli][debug] trio: 0.22.2 [cli][debug] trio-websocket: 0.10.4 [cli][debug] typing-extensions: 4.7.1 [cli][debug] urllib3: 2.0.4 [cli][debug] websocket-client: 1.6.3 [cli][debug] Arguments: [cli][debug] url=https://www.pandalive.co.kr/live/play/pocet00 [cli][debug] --loglevel=debug [cli][debug] --ffmpeg-ffmpeg=C:\APP\Streamlink\ffmpeg\ffmpeg.exe [cli][info] Found matching plugin pandalive for URL https://www.pandalive.co.kr/live/play/pocet00 [plugins.pandalive][debug] Media code: pocet00 error: Unable to open URL: https://api.pandalive.co.kr/v1/live/play (400 Client Error: Bad Request for url: https://api.pandalive.co.kr/v1/live/play) ``` </issue> <code> [start of src/streamlink/plugins/pandalive.py] 1 """ 2 $description South Korean live-streaming platform for individual live streams. 3 $url pandalive.co.kr 4 $type live 5 $metadata author 6 $metadata title 7 """ 8 9 import logging 10 import re 11 12 from streamlink.plugin import Plugin, pluginmatcher 13 from streamlink.plugin.api import validate 14 from streamlink.stream.hls import HLSStream 15 16 17 log = logging.getLogger(__name__) 18 19 20 @pluginmatcher(re.compile( 21 r"https?://(?:www\.)?pandalive\.co\.kr/", 22 )) 23 class Pandalive(Plugin): 24 def _get_streams(self): 25 media_code = self.session.http.get(self.url, schema=validate.Schema( 26 re.compile(r"""routePath:\s*(?P<q>["'])(\\u002F|/)live(\\u002F|/)play(\\u002F|/)(?P<id>.+?)(?P=q)"""), 27 validate.any(None, validate.get("id")), 28 )) 29 30 if not media_code: 31 return 32 33 log.debug(f"Media code: {media_code}") 34 35 json = self.session.http.post( 36 "https://api.pandalive.co.kr/v1/live/play", 37 data={ 38 "action": "watch", 39 "userId": media_code, 40 }, 41 schema=validate.Schema( 42 validate.parse_json(), 43 validate.any( 44 { 45 "media": { 46 "title": str, 47 "userId": str, 48 "userNick": str, 49 "isPw": bool, 50 "isLive": bool, 51 "liveType": str, 52 }, 53 "PlayList": { 54 validate.optional("hls"): [{ 55 "url": validate.url(), 56 }], 57 validate.optional("hls2"): [{ 58 "url": validate.url(), 59 }], 60 validate.optional("hls3"): [{ 61 "url": validate.url(), 62 }], 63 }, 64 "result": bool, 65 "message": str, 66 }, 67 { 68 "result": bool, 69 "message": str, 70 }, 71 ), 72 ), 73 ) 74 75 if not json["result"]: 76 log.error(json["message"]) 77 return 78 79 if not json["media"]["isLive"]: 80 log.error("The broadcast has ended") 81 return 82 83 if json["media"]["isPw"]: 84 log.error("The broadcast is password protected") 85 return 86 87 log.info(f"Broadcast type: {json['media']['liveType']}") 88 89 self.author = f"{json['media']['userNick']} ({json['media']['userId']})" 90 self.title = f"{json['media']['title']}" 91 92 playlist = json["PlayList"] 93 for key in ("hls", "hls2", "hls3"): 94 # use the first available HLS stream 95 if key in playlist and playlist[key]: 96 # all stream qualities share the same URL, so just use the first one 97 return HLSStream.parse_variant_playlist(self.session, playlist[key][0]["url"]) 98 99 100 __plugin__ = Pandalive 101 [end of src/streamlink/plugins/pandalive.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/pandalive.py b/src/streamlink/plugins/pandalive.py --- a/src/streamlink/plugins/pandalive.py +++ b/src/streamlink/plugins/pandalive.py @@ -18,7 +18,7 @@ @pluginmatcher(re.compile( - r"https?://(?:www\.)?pandalive\.co\.kr/", + r"https?://(?:www\.)?pandalive\.co\.kr/live/play/[^/]+", )) class Pandalive(Plugin): def _get_streams(self): @@ -34,10 +34,14 @@ json = self.session.http.post( "https://api.pandalive.co.kr/v1/live/play", + headers={ + "Referer": self.url, + }, data={ "action": "watch", "userId": media_code, }, + acceptable_status=(200, 400), schema=validate.Schema( validate.parse_json(), validate.any(
{"golden_diff": "diff --git a/src/streamlink/plugins/pandalive.py b/src/streamlink/plugins/pandalive.py\n--- a/src/streamlink/plugins/pandalive.py\n+++ b/src/streamlink/plugins/pandalive.py\n@@ -18,7 +18,7 @@\n \n \n @pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?pandalive\\.co\\.kr/\",\n+ r\"https?://(?:www\\.)?pandalive\\.co\\.kr/live/play/[^/]+\",\n ))\n class Pandalive(Plugin):\n def _get_streams(self):\n@@ -34,10 +34,14 @@\n \n json = self.session.http.post(\n \"https://api.pandalive.co.kr/v1/live/play\",\n+ headers={\n+ \"Referer\": self.url,\n+ },\n data={\n \"action\": \"watch\",\n \"userId\": media_code,\n },\n+ acceptable_status=(200, 400),\n schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n", "issue": "plugins.pandalive: HTTP status 400 on API call\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.2.0\n\n### Description\n\nthe livestreams can't resolved on pandalive\r\nit was always resolved 400 Client Error \n\n### Debug log\n\n```text\nC:\\Users\\Jerry>C:\\APP\\Streamlink\\bin\\streamlink.exe https://www.pandalive.co.kr/live/play/pocet00 --loglevel=debug\r\n[session][debug] Plugin pandalive is being overridden by C:\\Users\\Jerry\\AppData\\Roaming\\streamlink\\plugins\\pandalive.py\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.5\r\n[cli][debug] OpenSSL: OpenSSL 3.0.9 30 May 2023\r\n[cli][debug] Streamlink: 6.2.0\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.7.22\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.18.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.22.2\r\n[cli][debug] trio-websocket: 0.10.4\r\n[cli][debug] typing-extensions: 4.7.1\r\n[cli][debug] urllib3: 2.0.4\r\n[cli][debug] websocket-client: 1.6.3\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.pandalive.co.kr/live/play/pocet00\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\APP\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin pandalive for URL https://www.pandalive.co.kr/live/play/pocet00\r\n[plugins.pandalive][debug] Media code: pocet00\r\nerror: Unable to open URL: https://api.pandalive.co.kr/v1/live/play (400 Client Error: Bad Request for url: https://api.pandalive.co.kr/v1/live/play)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description South Korean live-streaming platform for individual live streams.\n$url pandalive.co.kr\n$type live\n$metadata author\n$metadata title\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?pandalive\\.co\\.kr/\",\n))\nclass Pandalive(Plugin):\n def _get_streams(self):\n media_code = self.session.http.get(self.url, schema=validate.Schema(\n re.compile(r\"\"\"routePath:\\s*(?P<q>[\"'])(\\\\u002F|/)live(\\\\u002F|/)play(\\\\u002F|/)(?P<id>.+?)(?P=q)\"\"\"),\n validate.any(None, validate.get(\"id\")),\n ))\n\n if not media_code:\n return\n\n log.debug(f\"Media code: {media_code}\")\n\n json = self.session.http.post(\n \"https://api.pandalive.co.kr/v1/live/play\",\n data={\n \"action\": \"watch\",\n \"userId\": media_code,\n },\n schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n {\n \"media\": {\n \"title\": str,\n \"userId\": str,\n \"userNick\": str,\n \"isPw\": bool,\n \"isLive\": bool,\n \"liveType\": str,\n },\n \"PlayList\": {\n validate.optional(\"hls\"): [{\n \"url\": validate.url(),\n }],\n validate.optional(\"hls2\"): [{\n \"url\": validate.url(),\n }],\n validate.optional(\"hls3\"): [{\n \"url\": validate.url(),\n }],\n },\n \"result\": bool,\n \"message\": str,\n },\n {\n \"result\": bool,\n \"message\": str,\n },\n ),\n ),\n )\n\n if not json[\"result\"]:\n log.error(json[\"message\"])\n return\n\n if not json[\"media\"][\"isLive\"]:\n log.error(\"The broadcast has ended\")\n return\n\n if json[\"media\"][\"isPw\"]:\n log.error(\"The broadcast is password protected\")\n return\n\n log.info(f\"Broadcast type: {json['media']['liveType']}\")\n\n self.author = f\"{json['media']['userNick']} ({json['media']['userId']})\"\n self.title = f\"{json['media']['title']}\"\n\n playlist = json[\"PlayList\"]\n for key in (\"hls\", \"hls2\", \"hls3\"):\n # use the first available HLS stream\n if key in playlist and playlist[key]:\n # all stream qualities share the same URL, so just use the first one\n return HLSStream.parse_variant_playlist(self.session, playlist[key][0][\"url\"])\n\n\n__plugin__ = Pandalive\n", "path": "src/streamlink/plugins/pandalive.py"}]}
2,102
224
gh_patches_debug_7999
rasdani/github-patches
git_diff
feast-dev__feast-4116
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: UserWarning when passing `FeatureService` object to both `apply()` and `get_online_features()` - Caused by difference in `FeatureViewProjection.from_proto()` and `FeatureViewProjection.from_definition()` ## Context If a `FeatureService` object is created and is being passed to both the `apply()` and the `get_online_features()` method, the following user warning is thrown: ``` UserWarning: The FeatureService object that has been passed in as an argument is inconsistent with the version from the registry. Potentially a newer version of the FeatureService has been applied to the registry. ``` This is caused by a bug in the creation/retrieval of `feature_view_projections`, which is an attribute of `FeatureService`. An empty string is set to the `name_alias` value of `FeatureViewProjection` when calling [`from_proto`](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_view_projection.py#L52-L64). However, when creating the `FeatureViewProjection` by creating a `FeatureService` object (using the default value) `name_alias` is set to `None`, because it is created with [`FeatureViewProjection.from_definition()`](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_view_projection.py#L66-L73). The warning is raised [here](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_store.py#L532-L541), because the `feature_service_from_registry` has the empty string value for the underlying `FeatureViewProjection.name_alias` object, while the for `_features` the underlying `FeatureViewProjection.name_alias` is valued `None`. ## Expected Behavior I would expect that if a `FeatureViewProjection` encapsulated within a `FeatureService` has a `None` value for the `name_alias` attribute when being stored in the feature store, that it would also load with the same `None` value when calling `FeatureViewProjection.from_proto()`. Then the comparison should also not fail, and the warning should not be thrown. ## Current Behavior The `FeatureViewProjection` is loaded with an empty string for the value `name_alias` when `FeatureViewProjection.from_proto()` is called. This causes the. comparison to fail, and the warning to be thrown. ## Steps to reproduce docker-compose.yml ``` --- version: "3" services: db: restart: always image: postgres:16-alpine container_name: feast_db ports: - "5432:5432" environment: - POSTGRES_DB=feature_store - POSTGRES_USER=postgres - POSTGRES_PASSWORD=test ``` feature_store.yaml ``` project: project_name provider: local registry: registry_type: sql path: postgresql://postgres:[email protected]:5432/feature_store ``` Python script ``` from datetime import timedelta import pandas as pd from feast import ( Entity, FeatureService, FeatureStore, FeatureView, Field, SnowflakeSource, ValueType, ) from feast.types import Float32 feature_store = FeatureStore() # Apply entity = Entity( name="entity", join_keys=["ID"], value_type=ValueType.STRING, ) source = SnowflakeSource( name="source_snowflake", timestamp_field="EVENT_TIMESTAMP", schema="TEMP", table="FEAST_FEATURES", ) feature_view = FeatureView( name="feature_view__v1", entities=[entity], ttl=timedelta(days=0), schema=[ Field(name="FEATURE", dtype=Float32), ], online=True, source=source, ) feature_service = FeatureService( name="feature_service__v1", features=[feature_view], ) feature_store.apply( [ entity, source, feature_view, feature_service, ] ) # Inference entity_rows = [{"ID": "ID1"}, {"ID": "ID2"}] entity_df = pd.DataFrame(entity_rows) entity_df["event_timestamp"] = pd.to_datetime("now", utc=True) online_features = feature_store.get_online_features( entity_rows=entity_rows, features=feature_service, ).to_dict() print(online_features) ``` ### Specifications Using postgres registry. - Version: 0.36.0 - Platform: MacOS - M1 - Subsystem: Sonoma 14.1.1 ## Possible Solution I have little to no experience with proto, so I'm not sure whether it would be possible to load the None value directly from the proto definition. One potential solution would be to check whether an empty string is loaded for this field when loading from proto, and then set it to None in the `from_proto` function. </issue> <code> [start of sdk/python/feast/feature_view_projection.py] 1 from typing import TYPE_CHECKING, Dict, List, Optional 2 3 from attr import dataclass 4 5 from feast.field import Field 6 from feast.protos.feast.core.FeatureViewProjection_pb2 import ( 7 FeatureViewProjection as FeatureViewProjectionProto, 8 ) 9 10 if TYPE_CHECKING: 11 from feast.base_feature_view import BaseFeatureView 12 13 14 @dataclass 15 class FeatureViewProjection: 16 """ 17 A feature view projection represents a selection of one or more features from a 18 single feature view. 19 20 Attributes: 21 name: The unique name of the feature view from which this projection is created. 22 name_alias: An optional alias for the name. 23 features: The list of features represented by the feature view projection. 24 desired_features: The list of features that this feature view projection intends to select. 25 If empty, the projection intends to select all features. This attribute is only used 26 for feature service inference. It should only be set if the underlying feature view 27 is not ready to be projected, i.e. still needs to go through feature inference. 28 join_key_map: A map to modify join key columns during retrieval of this feature 29 view projection. 30 """ 31 32 name: str 33 name_alias: Optional[str] 34 desired_features: List[str] 35 features: List[Field] 36 join_key_map: Dict[str, str] = {} 37 38 def name_to_use(self): 39 return self.name_alias or self.name 40 41 def to_proto(self) -> FeatureViewProjectionProto: 42 feature_reference_proto = FeatureViewProjectionProto( 43 feature_view_name=self.name, 44 feature_view_name_alias=self.name_alias or "", 45 join_key_map=self.join_key_map, 46 ) 47 for feature in self.features: 48 feature_reference_proto.feature_columns.append(feature.to_proto()) 49 50 return feature_reference_proto 51 52 @staticmethod 53 def from_proto(proto: FeatureViewProjectionProto): 54 feature_view_projection = FeatureViewProjection( 55 name=proto.feature_view_name, 56 name_alias=proto.feature_view_name_alias, 57 features=[], 58 join_key_map=dict(proto.join_key_map), 59 desired_features=[], 60 ) 61 for feature_column in proto.feature_columns: 62 feature_view_projection.features.append(Field.from_proto(feature_column)) 63 64 return feature_view_projection 65 66 @staticmethod 67 def from_definition(base_feature_view: "BaseFeatureView"): 68 return FeatureViewProjection( 69 name=base_feature_view.name, 70 name_alias=None, 71 features=base_feature_view.features, 72 desired_features=[], 73 ) 74 75 def get_feature(self, feature_name: str) -> Field: 76 try: 77 return next(field for field in self.features if field.name == feature_name) 78 except StopIteration: 79 raise KeyError( 80 f"Feature {feature_name} not found in projection {self.name_to_use()}" 81 ) 82 [end of sdk/python/feast/feature_view_projection.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/feature_view_projection.py b/sdk/python/feast/feature_view_projection.py --- a/sdk/python/feast/feature_view_projection.py +++ b/sdk/python/feast/feature_view_projection.py @@ -53,7 +53,7 @@ def from_proto(proto: FeatureViewProjectionProto): feature_view_projection = FeatureViewProjection( name=proto.feature_view_name, - name_alias=proto.feature_view_name_alias, + name_alias=proto.feature_view_name_alias or None, features=[], join_key_map=dict(proto.join_key_map), desired_features=[],
{"golden_diff": "diff --git a/sdk/python/feast/feature_view_projection.py b/sdk/python/feast/feature_view_projection.py\n--- a/sdk/python/feast/feature_view_projection.py\n+++ b/sdk/python/feast/feature_view_projection.py\n@@ -53,7 +53,7 @@\n def from_proto(proto: FeatureViewProjectionProto):\n feature_view_projection = FeatureViewProjection(\n name=proto.feature_view_name,\n- name_alias=proto.feature_view_name_alias,\n+ name_alias=proto.feature_view_name_alias or None,\n features=[],\n join_key_map=dict(proto.join_key_map),\n desired_features=[],\n", "issue": "Bug: UserWarning when passing `FeatureService` object to both `apply()` and `get_online_features()` - Caused by difference in `FeatureViewProjection.from_proto()` and `FeatureViewProjection.from_definition()`\n## Context\r\n\r\nIf a `FeatureService` object is created and is being passed to both the `apply()` and the `get_online_features()` method, the following user warning is thrown: \r\n\r\n```\r\nUserWarning: The FeatureService object that has been passed in as an argument is inconsistent with the version from the registry. Potentially a newer version of the FeatureService has been applied to the registry.\r\n```\r\n\r\nThis is caused by a bug in the creation/retrieval of `feature_view_projections`, which is an attribute of `FeatureService`. An empty string is set to the `name_alias` value of `FeatureViewProjection` when calling [`from_proto`](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_view_projection.py#L52-L64). However, when creating the `FeatureViewProjection` by creating a `FeatureService` object (using the default value) `name_alias` is set to `None`, because it is created with [`FeatureViewProjection.from_definition()`](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_view_projection.py#L66-L73).\r\n\r\nThe warning is raised [here](https://github.com/feast-dev/feast/blob/master/sdk/python/feast/feature_store.py#L532-L541), because the `feature_service_from_registry` has the empty string value for the underlying `FeatureViewProjection.name_alias` object, while the for `_features` the underlying `FeatureViewProjection.name_alias` is valued `None`.\r\n\r\n## Expected Behavior\r\n\r\nI would expect that if a `FeatureViewProjection` encapsulated within a `FeatureService` has a `None` value for the `name_alias` attribute when being stored in the feature store, that it would also load with the same `None` value when calling `FeatureViewProjection.from_proto()`.\r\n\r\nThen the comparison should also not fail, and the warning should not be thrown.\r\n\r\n## Current Behavior\r\n\r\nThe `FeatureViewProjection` is loaded with an empty string for the value `name_alias` when `FeatureViewProjection.from_proto()` is called. \r\n\r\nThis causes the. comparison to fail, and the warning to be thrown. \r\n\r\n## Steps to reproduce\r\n\r\ndocker-compose.yml\r\n```\r\n---\r\nversion: \"3\"\r\nservices:\r\n db:\r\n restart: always\r\n image: postgres:16-alpine\r\n container_name: feast_db\r\n ports:\r\n - \"5432:5432\"\r\n environment:\r\n - POSTGRES_DB=feature_store\r\n - POSTGRES_USER=postgres\r\n - POSTGRES_PASSWORD=test\r\n```\r\n\r\nfeature_store.yaml\r\n```\r\nproject: project_name\r\nprovider: local\r\nregistry:\r\n registry_type: sql\r\n path: postgresql://postgres:[email protected]:5432/feature_store\r\n```\r\n\r\nPython script\r\n```\r\nfrom datetime import timedelta\r\n\r\nimport pandas as pd\r\nfrom feast import (\r\n Entity,\r\n FeatureService,\r\n FeatureStore,\r\n FeatureView,\r\n Field,\r\n SnowflakeSource,\r\n ValueType,\r\n)\r\nfrom feast.types import Float32\r\n\r\nfeature_store = FeatureStore()\r\n\r\n# Apply\r\nentity = Entity(\r\n name=\"entity\",\r\n join_keys=[\"ID\"],\r\n value_type=ValueType.STRING,\r\n)\r\n\r\nsource = SnowflakeSource(\r\n name=\"source_snowflake\",\r\n timestamp_field=\"EVENT_TIMESTAMP\",\r\n schema=\"TEMP\",\r\n table=\"FEAST_FEATURES\",\r\n)\r\n\r\nfeature_view = FeatureView(\r\n name=\"feature_view__v1\",\r\n entities=[entity],\r\n ttl=timedelta(days=0),\r\n schema=[\r\n Field(name=\"FEATURE\", dtype=Float32),\r\n ],\r\n online=True,\r\n source=source,\r\n)\r\n\r\nfeature_service = FeatureService(\r\n name=\"feature_service__v1\",\r\n features=[feature_view],\r\n)\r\n\r\nfeature_store.apply(\r\n [\r\n entity,\r\n source,\r\n feature_view,\r\n feature_service,\r\n ]\r\n)\r\n\r\n# Inference\r\nentity_rows = [{\"ID\": \"ID1\"}, {\"ID\": \"ID2\"}]\r\nentity_df = pd.DataFrame(entity_rows)\r\nentity_df[\"event_timestamp\"] = pd.to_datetime(\"now\", utc=True)\r\n\r\nonline_features = feature_store.get_online_features(\r\n entity_rows=entity_rows,\r\n features=feature_service,\r\n).to_dict()\r\n\r\nprint(online_features)\r\n```\r\n\r\n### Specifications\r\n\r\nUsing postgres registry. \r\n\r\n- Version: 0.36.0\r\n- Platform: MacOS - M1\r\n- Subsystem: Sonoma 14.1.1\r\n\r\n## Possible Solution\r\n\r\nI have little to no experience with proto, so I'm not sure whether it would be possible to load the None value directly from the proto definition.\r\n\r\nOne potential solution would be to check whether an empty string is loaded for this field when loading from proto, and then set it to None in the `from_proto` function. \r\n\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Dict, List, Optional\n\nfrom attr import dataclass\n\nfrom feast.field import Field\nfrom feast.protos.feast.core.FeatureViewProjection_pb2 import (\n FeatureViewProjection as FeatureViewProjectionProto,\n)\n\nif TYPE_CHECKING:\n from feast.base_feature_view import BaseFeatureView\n\n\n@dataclass\nclass FeatureViewProjection:\n \"\"\"\n A feature view projection represents a selection of one or more features from a\n single feature view.\n\n Attributes:\n name: The unique name of the feature view from which this projection is created.\n name_alias: An optional alias for the name.\n features: The list of features represented by the feature view projection.\n desired_features: The list of features that this feature view projection intends to select.\n If empty, the projection intends to select all features. This attribute is only used\n for feature service inference. It should only be set if the underlying feature view\n is not ready to be projected, i.e. still needs to go through feature inference.\n join_key_map: A map to modify join key columns during retrieval of this feature\n view projection.\n \"\"\"\n\n name: str\n name_alias: Optional[str]\n desired_features: List[str]\n features: List[Field]\n join_key_map: Dict[str, str] = {}\n\n def name_to_use(self):\n return self.name_alias or self.name\n\n def to_proto(self) -> FeatureViewProjectionProto:\n feature_reference_proto = FeatureViewProjectionProto(\n feature_view_name=self.name,\n feature_view_name_alias=self.name_alias or \"\",\n join_key_map=self.join_key_map,\n )\n for feature in self.features:\n feature_reference_proto.feature_columns.append(feature.to_proto())\n\n return feature_reference_proto\n\n @staticmethod\n def from_proto(proto: FeatureViewProjectionProto):\n feature_view_projection = FeatureViewProjection(\n name=proto.feature_view_name,\n name_alias=proto.feature_view_name_alias,\n features=[],\n join_key_map=dict(proto.join_key_map),\n desired_features=[],\n )\n for feature_column in proto.feature_columns:\n feature_view_projection.features.append(Field.from_proto(feature_column))\n\n return feature_view_projection\n\n @staticmethod\n def from_definition(base_feature_view: \"BaseFeatureView\"):\n return FeatureViewProjection(\n name=base_feature_view.name,\n name_alias=None,\n features=base_feature_view.features,\n desired_features=[],\n )\n\n def get_feature(self, feature_name: str) -> Field:\n try:\n return next(field for field in self.features if field.name == feature_name)\n except StopIteration:\n raise KeyError(\n f\"Feature {feature_name} not found in projection {self.name_to_use()}\"\n )\n", "path": "sdk/python/feast/feature_view_projection.py"}]}
2,345
135
gh_patches_debug_41518
rasdani/github-patches
git_diff
lutris__lutris-2373
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Nine: Lutris can't find Nine libraries A lot of changed before creating PR #2092 and merging it - biggest issue is commit 792c22176eff9e063b22d7b9700e2e9b79a11fae, which changes return val of iter_lib_folders() from lists to strings. I believe I used iter_lib_folders() in #2092 because I needed distinguish between lib32 and lib64 paths. I will take a look at this and try to fix it ASAP (this week). Original report: https://github.com/lutris/lutris/pull/2092#issuecomment-529362315 </issue> <code> [start of lutris/util/wine/nine.py] 1 """Gallium Nine helper module""" 2 import os 3 import shutil 4 5 from lutris.util import system 6 from lutris.runners.commands.wine import wineexec 7 8 class NineUnavailable(RuntimeError): 9 """Exception raised when Gallium Nine is not available""" 10 11 class NineManager: 12 """Utility class to install and manage Gallium Nine to a Wine prefix""" 13 14 nine_files = ("d3d9-nine.dll", "ninewinecfg.exe") 15 mesa_files = ("d3dadapter9.so.1",) 16 17 def __init__(self, prefix, arch): 18 self.prefix = prefix 19 self.wine_arch = arch 20 21 @staticmethod 22 def nine_is_supported(): 23 """Check if MESA is built with Gallium Nine state tracker support 24 25 basic check for presence of d3dadapter9 library in 'd3d' subdirectory 26 of system library directory 27 """ 28 for mesa_file in NineManager.mesa_files: 29 if not any([os.path.exists(os.path.join(lib[0], "d3d", mesa_file)) 30 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 31 return False 32 33 if system.LINUX_SYSTEM.is_64_bit: 34 if not any([os.path.exists(os.path.join(lib[1], "d3d", mesa_file)) 35 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 36 return False 37 38 return True 39 40 @staticmethod 41 def nine_is_installed(): 42 """Check if Gallium Nine standalone is installed on this system 43 44 check 'wine/fakedlls' subdirectory of system library directory for Nine binaries 45 """ 46 for nine_file in NineManager.nine_files: 47 if not any([os.path.exists(os.path.join(lib[0], "wine/fakedlls", nine_file)) 48 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 49 return False 50 51 if system.LINUX_SYSTEM.is_64_bit: 52 if not any([os.path.exists(os.path.join(lib[1], "wine/fakedlls", nine_file)) 53 for lib in system.LINUX_SYSTEM.iter_lib_folders()]): 54 return False 55 56 return True 57 58 @staticmethod 59 def is_available(): 60 """Check if Gallium Nine can be enabled on this system""" 61 return NineManager.nine_is_supported() and NineManager.nine_is_installed() 62 63 def get_system_path(self, arch): 64 """Return path of Windows system directory with binaries of chosen architecture""" 65 windows_path = os.path.join(self.prefix, "drive_c/windows") 66 67 if self.wine_arch == "win32" and arch == "x32": 68 return os.path.join(windows_path, "system32") 69 if self.wine_arch == "win64" and arch == "x32": 70 return os.path.join(windows_path, "syswow64") 71 if self.wine_arch == "win64" and arch == "x64": 72 return os.path.join(windows_path, "system32") 73 74 return None 75 76 def is_prefix_prepared(self): 77 if not all(system.path_exists(os.path.join(self.get_system_path("x32"), nine_file)) 78 for nine_file in self.nine_files): 79 return False 80 81 if self.wine_arch == "win64": 82 if not all(system.path_exists(os.path.join(self.get_system_path("x64"), nine_file)) 83 for nine_file in self.nine_files): 84 return False 85 86 return True 87 88 def prepare_prefix(self): 89 for nine_file in NineManager.nine_files: 90 for lib in system.LINUX_SYSTEM.iter_lib_folders(): 91 nine_file_32 = os.path.join(lib[0], "wine/fakedlls", nine_file) 92 if os.path.exists(nine_file_32): 93 shutil.copy(nine_file_32, self.get_system_path("x32")) 94 95 if self.wine_arch == "win64": 96 nine_file_64 = os.path.join(lib[1], "wine/fakedlls", nine_file) 97 if os.path.exists(nine_file_64): 98 shutil.copy(nine_file_64, self.get_system_path("x64")) 99 100 def enable(self): 101 if not self.nine_is_supported(): 102 raise NineUnavailable("Nine is not supported on this system") 103 if not self.nine_is_installed(): 104 raise NineUnavailable("Nine Standalone is not installed") 105 if not self.is_prefix_prepared(): 106 self.prepare_prefix() 107 108 wineexec( 109 "ninewinecfg", 110 args="-e", 111 prefix=self.prefix, 112 blocking=True, 113 ) 114 115 def disable(self): 116 if self.is_prefix_prepared(): 117 wineexec( 118 "ninewinecfg", 119 args="-d", 120 prefix=self.prefix, 121 blocking=True, 122 ) 123 [end of lutris/util/wine/nine.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/wine/nine.py b/lutris/util/wine/nine.py --- a/lutris/util/wine/nine.py +++ b/lutris/util/wine/nine.py @@ -4,6 +4,7 @@ from lutris.util import system from lutris.runners.commands.wine import wineexec +from lutris.util.wine.cabinstall import CabInstaller class NineUnavailable(RuntimeError): """Exception raised when Gallium Nine is not available""" @@ -26,15 +27,10 @@ of system library directory """ for mesa_file in NineManager.mesa_files: - if not any([os.path.exists(os.path.join(lib[0], "d3d", mesa_file)) + if not any([os.path.exists(os.path.join(lib, "d3d", mesa_file)) for lib in system.LINUX_SYSTEM.iter_lib_folders()]): return False - if system.LINUX_SYSTEM.is_64_bit: - if not any([os.path.exists(os.path.join(lib[1], "d3d", mesa_file)) - for lib in system.LINUX_SYSTEM.iter_lib_folders()]): - return False - return True @staticmethod @@ -44,15 +40,10 @@ check 'wine/fakedlls' subdirectory of system library directory for Nine binaries """ for nine_file in NineManager.nine_files: - if not any([os.path.exists(os.path.join(lib[0], "wine/fakedlls", nine_file)) + if not any([os.path.exists(os.path.join(lib, "wine/fakedlls", nine_file)) for lib in system.LINUX_SYSTEM.iter_lib_folders()]): return False - if system.LINUX_SYSTEM.is_64_bit: - if not any([os.path.exists(os.path.join(lib[1], "wine/fakedlls", nine_file)) - for lib in system.LINUX_SYSTEM.iter_lib_folders()]): - return False - return True @staticmethod @@ -88,15 +79,24 @@ def prepare_prefix(self): for nine_file in NineManager.nine_files: for lib in system.LINUX_SYSTEM.iter_lib_folders(): - nine_file_32 = os.path.join(lib[0], "wine/fakedlls", nine_file) - if os.path.exists(nine_file_32): - shutil.copy(nine_file_32, self.get_system_path("x32")) + nine_file_path = os.path.join(lib, "wine/fakedlls", nine_file) + + if (os.path.exists(nine_file_path) and + CabInstaller.get_arch_from_dll(nine_file_path) == "win32"): + shutil.copy(nine_file_path, self.get_system_path("x32")) if self.wine_arch == "win64": - nine_file_64 = os.path.join(lib[1], "wine/fakedlls", nine_file) - if os.path.exists(nine_file_64): + if (os.path.exists(nine_file_path) and + CabInstaller.get_arch_from_dll(nine_file_path) == "win64"): shutil.copy(nine_file_64, self.get_system_path("x64")) + if not os.path.exists(os.path.join(self.get_system_path("x32"), nine_file)): + raise NineUnavailable("could not install " + nine_file + " (x32)") + + if self.wine_arch == "win64": + if not os.path.exists(os.path.join(self.get_system_path("x64"), nine_file)): + raise NineUnavailable("could not install " + nine_file + " (x64)") + def enable(self): if not self.nine_is_supported(): raise NineUnavailable("Nine is not supported on this system")
{"golden_diff": "diff --git a/lutris/util/wine/nine.py b/lutris/util/wine/nine.py\n--- a/lutris/util/wine/nine.py\n+++ b/lutris/util/wine/nine.py\n@@ -4,6 +4,7 @@\n \n from lutris.util import system\n from lutris.runners.commands.wine import wineexec\n+from lutris.util.wine.cabinstall import CabInstaller\n \n class NineUnavailable(RuntimeError):\n \"\"\"Exception raised when Gallium Nine is not available\"\"\"\n@@ -26,15 +27,10 @@\n of system library directory\n \"\"\"\n for mesa_file in NineManager.mesa_files:\n- if not any([os.path.exists(os.path.join(lib[0], \"d3d\", mesa_file))\n+ if not any([os.path.exists(os.path.join(lib, \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n \n- if system.LINUX_SYSTEM.is_64_bit:\n- if not any([os.path.exists(os.path.join(lib[1], \"d3d\", mesa_file))\n- for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n- return False\n-\n return True\n \n @staticmethod\n@@ -44,15 +40,10 @@\n check 'wine/fakedlls' subdirectory of system library directory for Nine binaries\n \"\"\"\n for nine_file in NineManager.nine_files:\n- if not any([os.path.exists(os.path.join(lib[0], \"wine/fakedlls\", nine_file))\n+ if not any([os.path.exists(os.path.join(lib, \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n \n- if system.LINUX_SYSTEM.is_64_bit:\n- if not any([os.path.exists(os.path.join(lib[1], \"wine/fakedlls\", nine_file))\n- for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n- return False\n-\n return True\n \n @staticmethod\n@@ -88,15 +79,24 @@\n def prepare_prefix(self):\n for nine_file in NineManager.nine_files:\n for lib in system.LINUX_SYSTEM.iter_lib_folders():\n- nine_file_32 = os.path.join(lib[0], \"wine/fakedlls\", nine_file)\n- if os.path.exists(nine_file_32):\n- shutil.copy(nine_file_32, self.get_system_path(\"x32\"))\n+ nine_file_path = os.path.join(lib, \"wine/fakedlls\", nine_file)\n+\n+ if (os.path.exists(nine_file_path) and\n+ CabInstaller.get_arch_from_dll(nine_file_path) == \"win32\"):\n+ shutil.copy(nine_file_path, self.get_system_path(\"x32\"))\n \n if self.wine_arch == \"win64\":\n- nine_file_64 = os.path.join(lib[1], \"wine/fakedlls\", nine_file)\n- if os.path.exists(nine_file_64):\n+ if (os.path.exists(nine_file_path) and\n+ CabInstaller.get_arch_from_dll(nine_file_path) == \"win64\"):\n shutil.copy(nine_file_64, self.get_system_path(\"x64\"))\n \n+ if not os.path.exists(os.path.join(self.get_system_path(\"x32\"), nine_file)):\n+ raise NineUnavailable(\"could not install \" + nine_file + \" (x32)\")\n+\n+ if self.wine_arch == \"win64\":\n+ if not os.path.exists(os.path.join(self.get_system_path(\"x64\"), nine_file)):\n+ raise NineUnavailable(\"could not install \" + nine_file + \" (x64)\")\n+\n def enable(self):\n if not self.nine_is_supported():\n raise NineUnavailable(\"Nine is not supported on this system\")\n", "issue": "Nine: Lutris can't find Nine libraries\nA lot of changed before creating PR #2092 and merging it - biggest issue is commit 792c22176eff9e063b22d7b9700e2e9b79a11fae, which changes return val of iter_lib_folders() from lists to strings. I believe I used iter_lib_folders() in #2092 because I needed distinguish between lib32 and lib64 paths. I will take a look at this and try to fix it ASAP (this week).\r\n\r\nOriginal report:\r\nhttps://github.com/lutris/lutris/pull/2092#issuecomment-529362315\n", "before_files": [{"content": "\"\"\"Gallium Nine helper module\"\"\"\nimport os\nimport shutil\n\nfrom lutris.util import system\nfrom lutris.runners.commands.wine import wineexec\n\nclass NineUnavailable(RuntimeError):\n \"\"\"Exception raised when Gallium Nine is not available\"\"\"\n\nclass NineManager:\n \"\"\"Utility class to install and manage Gallium Nine to a Wine prefix\"\"\"\n\n nine_files = (\"d3d9-nine.dll\", \"ninewinecfg.exe\")\n mesa_files = (\"d3dadapter9.so.1\",)\n\n def __init__(self, prefix, arch):\n self.prefix = prefix\n self.wine_arch = arch\n\n @staticmethod\n def nine_is_supported():\n \"\"\"Check if MESA is built with Gallium Nine state tracker support\n\n basic check for presence of d3dadapter9 library in 'd3d' subdirectory\n of system library directory\n \"\"\"\n for mesa_file in NineManager.mesa_files:\n if not any([os.path.exists(os.path.join(lib[0], \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n if system.LINUX_SYSTEM.is_64_bit:\n if not any([os.path.exists(os.path.join(lib[1], \"d3d\", mesa_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n return True\n\n @staticmethod\n def nine_is_installed():\n \"\"\"Check if Gallium Nine standalone is installed on this system\n\n check 'wine/fakedlls' subdirectory of system library directory for Nine binaries\n \"\"\"\n for nine_file in NineManager.nine_files:\n if not any([os.path.exists(os.path.join(lib[0], \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n if system.LINUX_SYSTEM.is_64_bit:\n if not any([os.path.exists(os.path.join(lib[1], \"wine/fakedlls\", nine_file))\n for lib in system.LINUX_SYSTEM.iter_lib_folders()]):\n return False\n\n return True\n\n @staticmethod\n def is_available():\n \"\"\"Check if Gallium Nine can be enabled on this system\"\"\"\n return NineManager.nine_is_supported() and NineManager.nine_is_installed()\n\n def get_system_path(self, arch):\n \"\"\"Return path of Windows system directory with binaries of chosen architecture\"\"\"\n windows_path = os.path.join(self.prefix, \"drive_c/windows\")\n\n if self.wine_arch == \"win32\" and arch == \"x32\":\n return os.path.join(windows_path, \"system32\")\n if self.wine_arch == \"win64\" and arch == \"x32\":\n return os.path.join(windows_path, \"syswow64\")\n if self.wine_arch == \"win64\" and arch == \"x64\":\n return os.path.join(windows_path, \"system32\")\n\n return None\n\n def is_prefix_prepared(self):\n if not all(system.path_exists(os.path.join(self.get_system_path(\"x32\"), nine_file))\n for nine_file in self.nine_files):\n return False\n\n if self.wine_arch == \"win64\":\n if not all(system.path_exists(os.path.join(self.get_system_path(\"x64\"), nine_file))\n for nine_file in self.nine_files):\n return False\n\n return True\n\n def prepare_prefix(self):\n for nine_file in NineManager.nine_files:\n for lib in system.LINUX_SYSTEM.iter_lib_folders():\n nine_file_32 = os.path.join(lib[0], \"wine/fakedlls\", nine_file)\n if os.path.exists(nine_file_32):\n shutil.copy(nine_file_32, self.get_system_path(\"x32\"))\n\n if self.wine_arch == \"win64\":\n nine_file_64 = os.path.join(lib[1], \"wine/fakedlls\", nine_file)\n if os.path.exists(nine_file_64):\n shutil.copy(nine_file_64, self.get_system_path(\"x64\"))\n\n def enable(self):\n if not self.nine_is_supported():\n raise NineUnavailable(\"Nine is not supported on this system\")\n if not self.nine_is_installed():\n raise NineUnavailable(\"Nine Standalone is not installed\")\n if not self.is_prefix_prepared():\n self.prepare_prefix()\n\n wineexec(\n \"ninewinecfg\",\n args=\"-e\",\n prefix=self.prefix,\n blocking=True,\n )\n\n def disable(self):\n if self.is_prefix_prepared():\n wineexec(\n \"ninewinecfg\",\n args=\"-d\",\n prefix=self.prefix,\n blocking=True,\n )\n", "path": "lutris/util/wine/nine.py"}]}
1,999
854
gh_patches_debug_39823
rasdani/github-patches
git_diff
rasterio__rasterio-971
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't unset crs metadata values Using the cli: ```bash # crs is defined $ rio info tests/data/alpha.tif | jq '.crs' "EPSG:3857" # attempt to unset crs $ rio edit-info tests/data/alpha.tif --crs "" # but $ rio info tests/data/alpha.tif | jq '.crs' "EPSG:3857" ``` Per @perrygeo - the python version of this is simple: ```python import rasterio from rasterio.crs import CRS raster = 'test/prep_test/de_hamburg_custom_srs/fixtures/dop20c_32587_5922.tif' with rasterio.open(raster, 'r+') as src: src.crs = CRS() ``` Reminds me of https://github.com/mapbox/rasterio/issues/328. </issue> <code> [start of rasterio/rio/edit_info.py] 1 """Fetch and edit raster dataset metadata from the command line.""" 2 3 4 import json 5 6 import click 7 8 import rasterio 9 import rasterio.crs 10 from rasterio.compat import string_types 11 from rasterio.crs import CRS 12 from rasterio.errors import CRSError 13 from rasterio.rio import options 14 from rasterio.transform import guard_transform 15 16 17 # Handlers for info module options. 18 19 def all_handler(ctx, param, value): 20 """Get tags from a template file or command line.""" 21 if ctx.obj and ctx.obj.get('like') and value is not None: 22 ctx.obj['all_like'] = value 23 value = ctx.obj.get('like') 24 return value 25 26 27 def crs_handler(ctx, param, value): 28 """Get crs value from a template file or command line.""" 29 retval = options.from_like_context(ctx, param, value) 30 if retval is None and value: 31 try: 32 retval = json.loads(value) 33 except ValueError: 34 retval = value 35 try: 36 if isinstance(retval, dict): 37 retval = CRS(retval) 38 else: 39 retval = CRS.from_string(retval) 40 except CRSError: 41 raise click.BadParameter( 42 "'%s' is not a recognized CRS." % retval, 43 param=param, param_hint='crs') 44 return retval 45 46 47 def tags_handler(ctx, param, value): 48 """Get tags from a template file or command line.""" 49 retval = options.from_like_context(ctx, param, value) 50 if retval is None and value: 51 try: 52 retval = dict(p.split('=') for p in value) 53 except: 54 raise click.BadParameter( 55 "'%s' contains a malformed tag." % value, 56 param=param, param_hint='transform') 57 return retval 58 59 60 def transform_handler(ctx, param, value): 61 """Get transform value from a template file or command line.""" 62 retval = options.from_like_context(ctx, param, value) 63 if retval is None and value: 64 try: 65 value = json.loads(value) 66 except ValueError: 67 pass 68 try: 69 retval = guard_transform(value) 70 except: 71 raise click.BadParameter( 72 "'%s' is not recognized as an Affine array." % value, 73 param=param, param_hint='transform') 74 return retval 75 76 77 @click.command('edit-info', short_help="Edit dataset metadata.") 78 @options.file_in_arg 79 @options.bidx_opt 80 @options.edit_nodata_opt 81 @click.option('--unset-nodata', default=False, is_flag=True, 82 help="Unset the dataset's nodata value.") 83 @click.option('--crs', callback=crs_handler, default=None, 84 help="New coordinate reference system") 85 @click.option('--transform', callback=transform_handler, 86 help="New affine transform matrix") 87 @click.option('--units', help="Edit units of a band (requires --bidx)") 88 @click.option('--description', 89 help="Edit description of a band (requires --bidx)") 90 @click.option('--tag', 'tags', callback=tags_handler, multiple=True, 91 metavar='KEY=VAL', help="New tag.") 92 @click.option('--all', 'allmd', callback=all_handler, flag_value='like', 93 is_eager=True, default=False, 94 help="Copy all metadata items from the template file.") 95 @options.like_opt 96 @click.pass_context 97 def edit(ctx, input, bidx, nodata, unset_nodata, crs, transform, units, 98 description, tags, allmd, like): 99 """Edit a dataset's metadata: coordinate reference system, affine 100 transformation matrix, nodata value, and tags. 101 102 The coordinate reference system may be either a PROJ.4 or EPSG:nnnn 103 string, 104 105 --crs 'EPSG:4326' 106 107 or a JSON text-encoded PROJ.4 object. 108 109 --crs '{"proj": "utm", "zone": 18, ...}' 110 111 Transforms are JSON-encoded Affine objects like: 112 113 --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]' 114 115 Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform, 116 but are no longer supported. 117 118 Metadata items may also be read from an existing dataset using a 119 combination of the --like option with at least one of --all, 120 `--crs like`, `--nodata like`, and `--transform like`. 121 122 rio edit-info example.tif --like template.tif --all 123 124 To get just the transform from the template: 125 126 rio edit-info example.tif --like template.tif --transform like 127 128 """ 129 import numpy as np 130 131 def in_dtype_range(value, dtype): 132 infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo, 133 'u': np.iinfo} 134 rng = infos[np.dtype(dtype).kind](dtype) 135 return rng.min <= value <= rng.max 136 137 with ctx.obj['env'], rasterio.open(input, 'r+') as dst: 138 139 if allmd: 140 nodata = allmd['nodata'] 141 crs = allmd['crs'] 142 transform = allmd['transform'] 143 tags = allmd['tags'] 144 145 if unset_nodata and nodata is not options.IgnoreOption: 146 raise click.BadParameter( 147 "--unset-nodata and --nodata cannot be used together.") 148 149 if unset_nodata: 150 # Setting nodata to None will raise NotImplementedError 151 # if GDALDeleteRasterNoDataValue() isn't present in the 152 # GDAL library. 153 try: 154 dst.nodata = None 155 except NotImplementedError as exc: # pragma: no cover 156 raise click.ClickException(str(exc)) 157 158 elif nodata is not options.IgnoreOption: 159 dtype = dst.dtypes[0] 160 if nodata is not None and not in_dtype_range(nodata, dtype): 161 raise click.BadParameter( 162 "outside the range of the file's " 163 "data type (%s)." % dtype, 164 param=nodata, param_hint='nodata') 165 dst.nodata = nodata 166 167 if crs: 168 dst.crs = crs 169 170 if transform: 171 dst.transform = transform 172 173 if tags: 174 dst.update_tags(**tags) 175 176 if units: 177 dst.set_units(bidx, units) 178 179 if description: 180 dst.set_description(bidx, description) 181 [end of rasterio/rio/edit_info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py --- a/rasterio/rio/edit_info.py +++ b/rasterio/rio/edit_info.py @@ -2,12 +2,12 @@ import json +import warnings import click import rasterio import rasterio.crs -from rasterio.compat import string_types from rasterio.crs import CRS from rasterio.errors import CRSError from rasterio.rio import options @@ -82,6 +82,8 @@ help="Unset the dataset's nodata value.") @click.option('--crs', callback=crs_handler, default=None, help="New coordinate reference system") [email protected]('--unset-crs', default=False, is_flag=True, + help="Unset the dataset's CRS value.") @click.option('--transform', callback=transform_handler, help="New affine transform matrix") @click.option('--units', help="Edit units of a band (requires --bidx)") @@ -94,8 +96,8 @@ help="Copy all metadata items from the template file.") @options.like_opt @click.pass_context -def edit(ctx, input, bidx, nodata, unset_nodata, crs, transform, units, - description, tags, allmd, like): +def edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform, + units, description, tags, allmd, like): """Edit a dataset's metadata: coordinate reference system, affine transformation matrix, nodata value, and tags. @@ -146,6 +148,10 @@ raise click.BadParameter( "--unset-nodata and --nodata cannot be used together.") + if unset_crs and crs: + raise click.BadParameter( + "--unset-crs and --crs cannot be used together.") + if unset_nodata: # Setting nodata to None will raise NotImplementedError # if GDALDeleteRasterNoDataValue() isn't present in the @@ -164,7 +170,9 @@ param=nodata, param_hint='nodata') dst.nodata = nodata - if crs: + if unset_crs: + dst.crs = CRS() + elif crs: dst.crs = crs if transform: @@ -178,3 +186,11 @@ if description: dst.set_description(bidx, description) + + # Post check - ensure that crs was unset properly + if unset_crs: + with ctx.obj['env'], rasterio.open(input, 'r') as src: + if dict(src.crs) != {}: + warnings.warn( + 'CRS was not unset. Availability of his functionality ' + 'differs depending on GDAL version and driver')
{"golden_diff": "diff --git a/rasterio/rio/edit_info.py b/rasterio/rio/edit_info.py\n--- a/rasterio/rio/edit_info.py\n+++ b/rasterio/rio/edit_info.py\n@@ -2,12 +2,12 @@\n \n \n import json\n+import warnings\n \n import click\n \n import rasterio\n import rasterio.crs\n-from rasterio.compat import string_types\n from rasterio.crs import CRS\n from rasterio.errors import CRSError\n from rasterio.rio import options\n@@ -82,6 +82,8 @@\n help=\"Unset the dataset's nodata value.\")\n @click.option('--crs', callback=crs_handler, default=None,\n help=\"New coordinate reference system\")\[email protected]('--unset-crs', default=False, is_flag=True,\n+ help=\"Unset the dataset's CRS value.\")\n @click.option('--transform', callback=transform_handler,\n help=\"New affine transform matrix\")\n @click.option('--units', help=\"Edit units of a band (requires --bidx)\")\n@@ -94,8 +96,8 @@\n help=\"Copy all metadata items from the template file.\")\n @options.like_opt\n @click.pass_context\n-def edit(ctx, input, bidx, nodata, unset_nodata, crs, transform, units,\n- description, tags, allmd, like):\n+def edit(ctx, input, bidx, nodata, unset_nodata, crs, unset_crs, transform,\n+ units, description, tags, allmd, like):\n \"\"\"Edit a dataset's metadata: coordinate reference system, affine\n transformation matrix, nodata value, and tags.\n \n@@ -146,6 +148,10 @@\n raise click.BadParameter(\n \"--unset-nodata and --nodata cannot be used together.\")\n \n+ if unset_crs and crs:\n+ raise click.BadParameter(\n+ \"--unset-crs and --crs cannot be used together.\")\n+\n if unset_nodata:\n # Setting nodata to None will raise NotImplementedError\n # if GDALDeleteRasterNoDataValue() isn't present in the\n@@ -164,7 +170,9 @@\n param=nodata, param_hint='nodata')\n dst.nodata = nodata\n \n- if crs:\n+ if unset_crs:\n+ dst.crs = CRS()\n+ elif crs:\n dst.crs = crs\n \n if transform:\n@@ -178,3 +186,11 @@\n \n if description:\n dst.set_description(bidx, description)\n+\n+ # Post check - ensure that crs was unset properly\n+ if unset_crs:\n+ with ctx.obj['env'], rasterio.open(input, 'r') as src:\n+ if dict(src.crs) != {}:\n+ warnings.warn(\n+ 'CRS was not unset. Availability of his functionality '\n+ 'differs depending on GDAL version and driver')\n", "issue": "Can't unset crs metadata values\nUsing the cli:\r\n```bash\r\n# crs is defined\r\n$ rio info tests/data/alpha.tif | jq '.crs'\r\n\"EPSG:3857\"\r\n\r\n# attempt to unset crs\r\n$ rio edit-info tests/data/alpha.tif --crs \"\"\r\n\r\n# but\r\n$ rio info tests/data/alpha.tif | jq '.crs'\r\n\"EPSG:3857\"\r\n```\r\n\r\nPer @perrygeo - the python version of this is simple:\r\n```python\r\nimport rasterio\r\nfrom rasterio.crs import CRS\r\n\r\nraster = 'test/prep_test/de_hamburg_custom_srs/fixtures/dop20c_32587_5922.tif'\r\n\r\nwith rasterio.open(raster, 'r+') as src:\r\n src.crs = CRS()\r\n```\r\n\r\nReminds me of https://github.com/mapbox/rasterio/issues/328.\n", "before_files": [{"content": "\"\"\"Fetch and edit raster dataset metadata from the command line.\"\"\"\n\n\nimport json\n\nimport click\n\nimport rasterio\nimport rasterio.crs\nfrom rasterio.compat import string_types\nfrom rasterio.crs import CRS\nfrom rasterio.errors import CRSError\nfrom rasterio.rio import options\nfrom rasterio.transform import guard_transform\n\n\n# Handlers for info module options.\n\ndef all_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n if ctx.obj and ctx.obj.get('like') and value is not None:\n ctx.obj['all_like'] = value\n value = ctx.obj.get('like')\n return value\n\n\ndef crs_handler(ctx, param, value):\n \"\"\"Get crs value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = json.loads(value)\n except ValueError:\n retval = value\n try:\n if isinstance(retval, dict):\n retval = CRS(retval)\n else:\n retval = CRS.from_string(retval)\n except CRSError:\n raise click.BadParameter(\n \"'%s' is not a recognized CRS.\" % retval,\n param=param, param_hint='crs')\n return retval\n\n\ndef tags_handler(ctx, param, value):\n \"\"\"Get tags from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n retval = dict(p.split('=') for p in value)\n except:\n raise click.BadParameter(\n \"'%s' contains a malformed tag.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\ndef transform_handler(ctx, param, value):\n \"\"\"Get transform value from a template file or command line.\"\"\"\n retval = options.from_like_context(ctx, param, value)\n if retval is None and value:\n try:\n value = json.loads(value)\n except ValueError:\n pass\n try:\n retval = guard_transform(value)\n except:\n raise click.BadParameter(\n \"'%s' is not recognized as an Affine array.\" % value,\n param=param, param_hint='transform')\n return retval\n\n\[email protected]('edit-info', short_help=\"Edit dataset metadata.\")\[email protected]_in_arg\[email protected]_opt\[email protected]_nodata_opt\[email protected]('--unset-nodata', default=False, is_flag=True,\n help=\"Unset the dataset's nodata value.\")\[email protected]('--crs', callback=crs_handler, default=None,\n help=\"New coordinate reference system\")\[email protected]('--transform', callback=transform_handler,\n help=\"New affine transform matrix\")\[email protected]('--units', help=\"Edit units of a band (requires --bidx)\")\[email protected]('--description',\n help=\"Edit description of a band (requires --bidx)\")\[email protected]('--tag', 'tags', callback=tags_handler, multiple=True,\n metavar='KEY=VAL', help=\"New tag.\")\[email protected]('--all', 'allmd', callback=all_handler, flag_value='like',\n is_eager=True, default=False,\n help=\"Copy all metadata items from the template file.\")\[email protected]_opt\[email protected]_context\ndef edit(ctx, input, bidx, nodata, unset_nodata, crs, transform, units,\n description, tags, allmd, like):\n \"\"\"Edit a dataset's metadata: coordinate reference system, affine\n transformation matrix, nodata value, and tags.\n\n The coordinate reference system may be either a PROJ.4 or EPSG:nnnn\n string,\n\n --crs 'EPSG:4326'\n\n or a JSON text-encoded PROJ.4 object.\n\n --crs '{\"proj\": \"utm\", \"zone\": 18, ...}'\n\n Transforms are JSON-encoded Affine objects like:\n\n --transform '[300.038, 0.0, 101985.0, 0.0, -300.042, 2826915.0]'\n\n Prior to Rasterio 1.0 GDAL geotransforms were supported for --transform,\n but are no longer supported.\n\n Metadata items may also be read from an existing dataset using a\n combination of the --like option with at least one of --all,\n `--crs like`, `--nodata like`, and `--transform like`.\n\n rio edit-info example.tif --like template.tif --all\n\n To get just the transform from the template:\n\n rio edit-info example.tif --like template.tif --transform like\n\n \"\"\"\n import numpy as np\n\n def in_dtype_range(value, dtype):\n infos = {'c': np.finfo, 'f': np.finfo, 'i': np.iinfo,\n 'u': np.iinfo}\n rng = infos[np.dtype(dtype).kind](dtype)\n return rng.min <= value <= rng.max\n\n with ctx.obj['env'], rasterio.open(input, 'r+') as dst:\n\n if allmd:\n nodata = allmd['nodata']\n crs = allmd['crs']\n transform = allmd['transform']\n tags = allmd['tags']\n\n if unset_nodata and nodata is not options.IgnoreOption:\n raise click.BadParameter(\n \"--unset-nodata and --nodata cannot be used together.\")\n\n if unset_nodata:\n # Setting nodata to None will raise NotImplementedError\n # if GDALDeleteRasterNoDataValue() isn't present in the\n # GDAL library.\n try:\n dst.nodata = None\n except NotImplementedError as exc: # pragma: no cover\n raise click.ClickException(str(exc))\n\n elif nodata is not options.IgnoreOption:\n dtype = dst.dtypes[0]\n if nodata is not None and not in_dtype_range(nodata, dtype):\n raise click.BadParameter(\n \"outside the range of the file's \"\n \"data type (%s).\" % dtype,\n param=nodata, param_hint='nodata')\n dst.nodata = nodata\n\n if crs:\n dst.crs = crs\n\n if transform:\n dst.transform = transform\n\n if tags:\n dst.update_tags(**tags)\n\n if units:\n dst.set_units(bidx, units)\n\n if description:\n dst.set_description(bidx, description)\n", "path": "rasterio/rio/edit_info.py"}]}
2,578
644
gh_patches_debug_61134
rasdani/github-patches
git_diff
e2nIEE__pandapower-1293
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> networkx compatibility issue with python3.9 There's a networkx>=2.5 dependency to run pandapower under python3.9 as gcd has to be imported from math instead of fractions. </issue> <code> [start of setup.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics 4 # and Energy System Technology (IEE), Kassel. All rights reserved. 5 6 from setuptools import setup, find_packages 7 import re 8 9 with open('README.rst', 'rb') as f: 10 install = f.read().decode('utf-8') 11 12 with open('CHANGELOG.rst', 'rb') as f: 13 changelog = f.read().decode('utf-8') 14 15 classifiers = [ 16 'Development Status :: 5 - Production/Stable', 17 'Environment :: Console', 18 'Intended Audience :: Developers', 19 'Intended Audience :: Education', 20 'Intended Audience :: Science/Research', 21 'License :: OSI Approved :: BSD License', 22 'Natural Language :: English', 23 'Operating System :: OS Independent', 24 'Programming Language :: Python', 25 'Programming Language :: Python :: 3'] 26 27 with open('.github/workflows/github_test_action.yml', 'rb') as f: 28 lines = f.read().decode('utf-8') 29 versions = set(re.findall('3.[0-9]', lines)) 30 for version in versions: 31 classifiers.append('Programming Language :: Python :: 3.%s' % version[-1]) 32 33 long_description = '\n\n'.join((install, changelog)) 34 35 setup( 36 name='pandapower', 37 version='2.6.0', 38 author='Leon Thurner, Alexander Scheidler', 39 author_email='[email protected], [email protected]', 40 description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.', 41 long_description=long_description, 42 long_description_content_type='text/x-rst', 43 url='http://www.pandapower.org', 44 license='BSD', 45 install_requires=["pandas>=0.17", 46 "networkx", 47 "scipy<=1.6.0", 48 "numpy>=0.11", 49 "packaging", 50 "xlsxwriter", 51 "xlrd", 52 "openpyxl", 53 "cryptography"], 54 extras_require={ 55 "docs": ["numpydoc", "sphinx", "sphinx_rtd_theme"], 56 "plotting": ["plotly", "matplotlib", "python-igraph"], 57 "test": ["pytest", "pytest-xdist"], 58 "performance": ["ortools"]}, 59 packages=find_packages(), 60 include_package_data=True, 61 classifiers=classifiers 62 ) 63 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ url='http://www.pandapower.org', license='BSD', install_requires=["pandas>=0.17", - "networkx", + "networkx>=2.5", "scipy<=1.6.0", "numpy>=0.11", "packaging",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n- \"networkx\",\n+ \"networkx>=2.5\",\n \"scipy<=1.6.0\",\n \"numpy>=0.11\",\n \"packaging\",\n", "issue": "networkx compatibility issue with python3.9\nThere's a networkx>=2.5 dependency to run pandapower under python3.9 as gcd has to be imported from math instead of fractions.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nfrom setuptools import setup, find_packages\nimport re\n\nwith open('README.rst', 'rb') as f:\n install = f.read().decode('utf-8')\n\nwith open('CHANGELOG.rst', 'rb') as f:\n changelog = f.read().decode('utf-8')\n\nclassifiers = [\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3']\n\nwith open('.github/workflows/github_test_action.yml', 'rb') as f:\n lines = f.read().decode('utf-8')\n versions = set(re.findall('3.[0-9]', lines))\n for version in versions:\n classifiers.append('Programming Language :: Python :: 3.%s' % version[-1])\n\nlong_description = '\\n\\n'.join((install, changelog))\n\nsetup(\n name='pandapower',\n version='2.6.0',\n author='Leon Thurner, Alexander Scheidler',\n author_email='[email protected], [email protected]',\n description='An easy to use open source tool for power system modeling, analysis and optimization with a high degree of automation.',\n long_description=long_description,\n\tlong_description_content_type='text/x-rst',\n url='http://www.pandapower.org',\n license='BSD',\n install_requires=[\"pandas>=0.17\",\n \"networkx\",\n \"scipy<=1.6.0\",\n \"numpy>=0.11\",\n \"packaging\",\n \"xlsxwriter\",\n \"xlrd\",\n \"openpyxl\",\n \"cryptography\"],\n extras_require={\n \"docs\": [\"numpydoc\", \"sphinx\", \"sphinx_rtd_theme\"],\n \"plotting\": [\"plotly\", \"matplotlib\", \"python-igraph\"],\n \"test\": [\"pytest\", \"pytest-xdist\"],\n \"performance\": [\"ortools\"]},\n packages=find_packages(),\n include_package_data=True,\n classifiers=classifiers\n)\n", "path": "setup.py"}]}
1,252
102
gh_patches_debug_41893
rasdani/github-patches
git_diff
scikit-image__scikit-image-3932
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> remove unused dtype argument in IO plugins ## Description See #3918 * Add deprecation cycle to these functions. * Add a bullet in TODO.txt. Indicate #3918 as a way to clean the code in the future. </issue> <code> [start of skimage/io/_plugins/gdal_plugin.py] 1 __all__ = ['imread'] 2 3 try: 4 import osgeo.gdal as gdal 5 except ImportError: 6 raise ImportError("The GDAL Library could not be found. " 7 "Please refer to http://www.gdal.org/ " 8 "for further instructions.") 9 10 11 def imread(fname, dtype=None): 12 """Load an image from file. 13 14 """ 15 ds = gdal.Open(fname) 16 17 return ds.ReadAsArray().astype(dtype) 18 [end of skimage/io/_plugins/gdal_plugin.py] [start of skimage/io/_plugins/fits_plugin.py] 1 __all__ = ['imread', 'imread_collection'] 2 3 import skimage.io as io 4 5 try: 6 from astropy.io import fits as pyfits 7 except ImportError: 8 try: 9 import pyfits 10 except ImportError: 11 raise ImportError( 12 "PyFITS could not be found. Please refer to\n" 13 "http://www.stsci.edu/resources/software_hardware/pyfits\n" 14 "for further instructions.") 15 16 17 def imread(fname, dtype=None): 18 """Load an image from a FITS file. 19 20 Parameters 21 ---------- 22 fname : string 23 Image file name, e.g. ``test.fits``. 24 dtype : dtype, optional 25 For FITS, this argument is ignored because Stefan is planning on 26 removing the dtype argument from imread anyway. 27 28 Returns 29 ------- 30 img_array : ndarray 31 Unlike plugins such as PIL, where different color bands/channels are 32 stored in the third dimension, FITS images are greyscale-only and can 33 be N-dimensional, so an array of the native FITS dimensionality is 34 returned, without color channels. 35 36 Currently if no image is found in the file, None will be returned 37 38 Notes 39 ----- 40 41 Currently FITS ``imread()`` always returns the first image extension when 42 given a Multi-Extension FITS file; use ``imread_collection()`` (which does 43 lazy loading) to get all the extensions at once. 44 45 """ 46 47 hdulist = pyfits.open(fname) 48 49 # Iterate over FITS image extensions, ignoring any other extension types 50 # such as binary tables, and get the first image data array: 51 img_array = None 52 for hdu in hdulist: 53 if isinstance(hdu, pyfits.ImageHDU) or \ 54 isinstance(hdu, pyfits.PrimaryHDU): 55 if hdu.data is not None: 56 img_array = hdu.data 57 break 58 hdulist.close() 59 60 return img_array 61 62 63 def imread_collection(load_pattern, conserve_memory=True): 64 """Load a collection of images from one or more FITS files 65 66 Parameters 67 ---------- 68 load_pattern : str or list 69 List of extensions to load. Filename globbing is currently 70 unsupported. 71 converve_memory : bool 72 If True, never keep more than one in memory at a specific 73 time. Otherwise, images will be cached once they are loaded. 74 75 Returns 76 ------- 77 78 ic : ImageCollection 79 Collection of images. 80 81 """ 82 83 intype = type(load_pattern) 84 if intype is not list and intype is not str: 85 raise TypeError("Input must be a filename or list of filenames") 86 87 # Ensure we have a list, otherwise we'll end up iterating over the string: 88 if intype is not list: 89 load_pattern = [load_pattern] 90 91 # Generate a list of filename/extension pairs by opening the list of 92 # files and finding the image extensions in each one: 93 ext_list = [] 94 for filename in load_pattern: 95 hdulist = pyfits.open(filename) 96 for n, hdu in zip(range(len(hdulist)), hdulist): 97 if isinstance(hdu, pyfits.ImageHDU) or \ 98 isinstance(hdu, pyfits.PrimaryHDU): 99 # Ignore (primary) header units with no data (use '.size' 100 # rather than '.data' to avoid actually loading the image): 101 try: 102 data_size = hdu.size() 103 except TypeError: # (size changed to int in PyFITS 3.1) 104 data_size = hdu.size 105 if data_size > 0: 106 ext_list.append((filename, n)) 107 hdulist.close() 108 109 return io.ImageCollection(ext_list, load_func=FITSFactory, 110 conserve_memory=conserve_memory) 111 112 113 def FITSFactory(image_ext): 114 """Load an image extension from a FITS file and return a NumPy array 115 116 Parameters 117 ---------- 118 119 image_ext : tuple 120 FITS extension to load, in the format ``(filename, ext_num)``. 121 The FITS ``(extname, extver)`` format is unsupported, since this 122 function is not called directly by the user and 123 ``imread_collection()`` does the work of figuring out which 124 extensions need loading. 125 126 """ 127 128 # Expect a length-2 tuple with a filename as the first element: 129 if not isinstance(image_ext, tuple): 130 raise TypeError("Expected a tuple") 131 132 if len(image_ext) != 2: 133 raise ValueError("Expected a tuple of length 2") 134 135 filename = image_ext[0] 136 extnum = image_ext[1] 137 138 if type(filename) is not str or type(extnum) is not int: 139 raise ValueError("Expected a (filename, extension) tuple") 140 141 hdulist = pyfits.open(filename) 142 143 data = hdulist[extnum].data 144 145 hdulist.close() 146 147 if data is None: 148 raise RuntimeError( 149 "Extension %d of %s has no data" % (extnum, filename)) 150 151 return data 152 [end of skimage/io/_plugins/fits_plugin.py] [start of skimage/io/_plugins/tifffile_plugin.py] 1 try: 2 from tifffile import TiffFile, imsave, parse_kwargs 3 except ImportError: 4 from ...external.tifffile import TiffFile, imsave, parse_kwargs 5 6 7 def imread(fname, dtype=None, **kwargs): 8 """Load a tiff image from file. 9 10 Parameters 11 ---------- 12 fname : str or file 13 File name or file-like-object. 14 dtype : numpy dtype object or string specifier 15 Specifies data type of array elements (Not currently used). 16 kwargs : keyword pairs, optional 17 Additional keyword arguments to pass through (see ``tifffile``'s 18 ``imread`` function). 19 20 Notes 21 ----- 22 Provided by Christophe Golhke's tifffile.py [1]_, and supports many 23 advanced image types including multi-page and floating point. 24 25 References 26 ---------- 27 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py 28 29 """ 30 31 if 'img_num' in kwargs: 32 kwargs['key'] = kwargs.pop('img_num') 33 34 # parse_kwargs will extract keyword arguments intended for the TiffFile 35 # class and remove them from the kwargs dictionary in-place 36 tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome'] 37 kwargs_tiff = parse_kwargs(kwargs, *tiff_keys) 38 39 # read and return tiff as numpy array 40 with TiffFile(fname, **kwargs_tiff) as tif: 41 return tif.asarray(**kwargs) 42 [end of skimage/io/_plugins/tifffile_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/io/_plugins/fits_plugin.py b/skimage/io/_plugins/fits_plugin.py --- a/skimage/io/_plugins/fits_plugin.py +++ b/skimage/io/_plugins/fits_plugin.py @@ -1,6 +1,7 @@ __all__ = ['imread', 'imread_collection'] import skimage.io as io +from warnings import warn try: from astropy.io import fits as pyfits @@ -22,8 +23,8 @@ fname : string Image file name, e.g. ``test.fits``. dtype : dtype, optional - For FITS, this argument is ignored because Stefan is planning on - removing the dtype argument from imread anyway. + Was always silently ignored. + Will be removed from version 0.17. Returns ------- @@ -43,6 +44,11 @@ lazy loading) to get all the extensions at once. """ + if 'dtype' is not None: + warn('The dtype argument was always silently ignored. It will be ' + 'removed from scikit-image version 0.17. To avoid this ' + 'warning, do not specify it in your function call.', + UserWarning, stacklevel=2) hdulist = pyfits.open(fname) diff --git a/skimage/io/_plugins/gdal_plugin.py b/skimage/io/_plugins/gdal_plugin.py --- a/skimage/io/_plugins/gdal_plugin.py +++ b/skimage/io/_plugins/gdal_plugin.py @@ -1,5 +1,7 @@ __all__ = ['imread'] +from warnings import warn + try: import osgeo.gdal as gdal except ImportError: @@ -12,6 +14,12 @@ """Load an image from file. """ + if 'dtype' is not None: + warn('The dtype argument was always silently ignored. It will be ' + 'removed from scikit-image version 0.17. To avoid this ' + 'warning, do not specify it in your function call.', + UserWarning, stacklevel=2) + ds = gdal.Open(fname) return ds.ReadAsArray().astype(dtype) diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py --- a/skimage/io/_plugins/tifffile_plugin.py +++ b/skimage/io/_plugins/tifffile_plugin.py @@ -1,3 +1,5 @@ +from warnings import warn + try: from tifffile import TiffFile, imsave, parse_kwargs except ImportError: @@ -12,7 +14,8 @@ fname : str or file File name or file-like-object. dtype : numpy dtype object or string specifier - Specifies data type of array elements (Not currently used). + Specifies data type of array elements. + Will be removed from version 0.17. kwargs : keyword pairs, optional Additional keyword arguments to pass through (see ``tifffile``'s ``imread`` function). @@ -27,11 +30,16 @@ .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py """ + if 'dtype' is not None: + warn('The dtype argument was always silently ignored. It will be ' + 'removed from scikit-image version 0.17. To avoid this ' + 'warning, do not specify it in your function call.', + UserWarning, stacklevel=2) if 'img_num' in kwargs: kwargs['key'] = kwargs.pop('img_num') - # parse_kwargs will extract keyword arguments intended for the TiffFile + # parse_kwargs will extract keyword arguments intended for the TiffFile # class and remove them from the kwargs dictionary in-place tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome'] kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)
{"golden_diff": "diff --git a/skimage/io/_plugins/fits_plugin.py b/skimage/io/_plugins/fits_plugin.py\n--- a/skimage/io/_plugins/fits_plugin.py\n+++ b/skimage/io/_plugins/fits_plugin.py\n@@ -1,6 +1,7 @@\n __all__ = ['imread', 'imread_collection']\n \n import skimage.io as io\n+from warnings import warn\n \n try:\n from astropy.io import fits as pyfits\n@@ -22,8 +23,8 @@\n fname : string\n Image file name, e.g. ``test.fits``.\n dtype : dtype, optional\n- For FITS, this argument is ignored because Stefan is planning on\n- removing the dtype argument from imread anyway.\n+ Was always silently ignored.\n+ Will be removed from version 0.17.\n \n Returns\n -------\n@@ -43,6 +44,11 @@\n lazy loading) to get all the extensions at once.\n \n \"\"\"\n+ if 'dtype' is not None:\n+ warn('The dtype argument was always silently ignored. It will be '\n+ 'removed from scikit-image version 0.17. To avoid this '\n+ 'warning, do not specify it in your function call.',\n+ UserWarning, stacklevel=2)\n \n hdulist = pyfits.open(fname)\n \ndiff --git a/skimage/io/_plugins/gdal_plugin.py b/skimage/io/_plugins/gdal_plugin.py\n--- a/skimage/io/_plugins/gdal_plugin.py\n+++ b/skimage/io/_plugins/gdal_plugin.py\n@@ -1,5 +1,7 @@\n __all__ = ['imread']\n \n+from warnings import warn\n+\n try:\n import osgeo.gdal as gdal\n except ImportError:\n@@ -12,6 +14,12 @@\n \"\"\"Load an image from file.\n \n \"\"\"\n+ if 'dtype' is not None:\n+ warn('The dtype argument was always silently ignored. It will be '\n+ 'removed from scikit-image version 0.17. To avoid this '\n+ 'warning, do not specify it in your function call.',\n+ UserWarning, stacklevel=2)\n+\n ds = gdal.Open(fname)\n \n return ds.ReadAsArray().astype(dtype)\ndiff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -1,3 +1,5 @@\n+from warnings import warn\n+\n try:\n from tifffile import TiffFile, imsave, parse_kwargs\n except ImportError:\n@@ -12,7 +14,8 @@\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n- Specifies data type of array elements (Not currently used).\n+ Specifies data type of array elements.\n+ Will be removed from version 0.17.\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n@@ -27,11 +30,16 @@\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n \n \"\"\"\n+ if 'dtype' is not None:\n+ warn('The dtype argument was always silently ignored. It will be '\n+ 'removed from scikit-image version 0.17. To avoid this '\n+ 'warning, do not specify it in your function call.',\n+ UserWarning, stacklevel=2)\n \n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n \n- # parse_kwargs will extract keyword arguments intended for the TiffFile \n+ # parse_kwargs will extract keyword arguments intended for the TiffFile\n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n", "issue": " remove unused dtype argument in IO plugins\n## Description\r\n\r\nSee #3918\r\n\r\n* Add deprecation cycle to these functions.\r\n* Add a bullet in TODO.txt. Indicate #3918 as a way to clean the code in the future.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "__all__ = ['imread']\n\ntry:\n import osgeo.gdal as gdal\nexcept ImportError:\n raise ImportError(\"The GDAL Library could not be found. \"\n \"Please refer to http://www.gdal.org/ \"\n \"for further instructions.\")\n\n\ndef imread(fname, dtype=None):\n \"\"\"Load an image from file.\n\n \"\"\"\n ds = gdal.Open(fname)\n\n return ds.ReadAsArray().astype(dtype)\n", "path": "skimage/io/_plugins/gdal_plugin.py"}, {"content": "__all__ = ['imread', 'imread_collection']\n\nimport skimage.io as io\n\ntry:\n from astropy.io import fits as pyfits\nexcept ImportError:\n try:\n import pyfits\n except ImportError:\n raise ImportError(\n \"PyFITS could not be found. Please refer to\\n\"\n \"http://www.stsci.edu/resources/software_hardware/pyfits\\n\"\n \"for further instructions.\")\n\n\ndef imread(fname, dtype=None):\n \"\"\"Load an image from a FITS file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.fits``.\n dtype : dtype, optional\n For FITS, this argument is ignored because Stefan is planning on\n removing the dtype argument from imread anyway.\n\n Returns\n -------\n img_array : ndarray\n Unlike plugins such as PIL, where different color bands/channels are\n stored in the third dimension, FITS images are greyscale-only and can\n be N-dimensional, so an array of the native FITS dimensionality is\n returned, without color channels.\n\n Currently if no image is found in the file, None will be returned\n\n Notes\n -----\n\n Currently FITS ``imread()`` always returns the first image extension when\n given a Multi-Extension FITS file; use ``imread_collection()`` (which does\n lazy loading) to get all the extensions at once.\n\n \"\"\"\n\n hdulist = pyfits.open(fname)\n\n # Iterate over FITS image extensions, ignoring any other extension types\n # such as binary tables, and get the first image data array:\n img_array = None\n for hdu in hdulist:\n if isinstance(hdu, pyfits.ImageHDU) or \\\n isinstance(hdu, pyfits.PrimaryHDU):\n if hdu.data is not None:\n img_array = hdu.data\n break\n hdulist.close()\n\n return img_array\n\n\ndef imread_collection(load_pattern, conserve_memory=True):\n \"\"\"Load a collection of images from one or more FITS files\n\n Parameters\n ----------\n load_pattern : str or list\n List of extensions to load. Filename globbing is currently\n unsupported.\n converve_memory : bool\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n\n ic : ImageCollection\n Collection of images.\n\n \"\"\"\n\n intype = type(load_pattern)\n if intype is not list and intype is not str:\n raise TypeError(\"Input must be a filename or list of filenames\")\n\n # Ensure we have a list, otherwise we'll end up iterating over the string:\n if intype is not list:\n load_pattern = [load_pattern]\n\n # Generate a list of filename/extension pairs by opening the list of\n # files and finding the image extensions in each one:\n ext_list = []\n for filename in load_pattern:\n hdulist = pyfits.open(filename)\n for n, hdu in zip(range(len(hdulist)), hdulist):\n if isinstance(hdu, pyfits.ImageHDU) or \\\n isinstance(hdu, pyfits.PrimaryHDU):\n # Ignore (primary) header units with no data (use '.size'\n # rather than '.data' to avoid actually loading the image):\n try:\n data_size = hdu.size()\n except TypeError: # (size changed to int in PyFITS 3.1)\n data_size = hdu.size\n if data_size > 0:\n ext_list.append((filename, n))\n hdulist.close()\n\n return io.ImageCollection(ext_list, load_func=FITSFactory,\n conserve_memory=conserve_memory)\n\n\ndef FITSFactory(image_ext):\n \"\"\"Load an image extension from a FITS file and return a NumPy array\n\n Parameters\n ----------\n\n image_ext : tuple\n FITS extension to load, in the format ``(filename, ext_num)``.\n The FITS ``(extname, extver)`` format is unsupported, since this\n function is not called directly by the user and\n ``imread_collection()`` does the work of figuring out which\n extensions need loading.\n\n \"\"\"\n\n # Expect a length-2 tuple with a filename as the first element:\n if not isinstance(image_ext, tuple):\n raise TypeError(\"Expected a tuple\")\n\n if len(image_ext) != 2:\n raise ValueError(\"Expected a tuple of length 2\")\n\n filename = image_ext[0]\n extnum = image_ext[1]\n\n if type(filename) is not str or type(extnum) is not int:\n raise ValueError(\"Expected a (filename, extension) tuple\")\n\n hdulist = pyfits.open(filename)\n\n data = hdulist[extnum].data\n\n hdulist.close()\n\n if data is None:\n raise RuntimeError(\n \"Extension %d of %s has no data\" % (extnum, filename))\n\n return data\n", "path": "skimage/io/_plugins/fits_plugin.py"}, {"content": "try:\n from tifffile import TiffFile, imsave, parse_kwargs\nexcept ImportError:\n from ...external.tifffile import TiffFile, imsave, parse_kwargs\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n\n # parse_kwargs will extract keyword arguments intended for the TiffFile \n # class and remove them from the kwargs dictionary in-place\n tiff_keys = ['multifile', 'multifile_close', 'pages', 'fastij', 'is_ome']\n kwargs_tiff = parse_kwargs(kwargs, *tiff_keys)\n\n # read and return tiff as numpy array\n with TiffFile(fname, **kwargs_tiff) as tif:\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]}
2,624
926
gh_patches_debug_719
rasdani/github-patches
git_diff
mne-tools__mne-bids-pipeline-680
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Doc deployment step failing The latest CI run failed to execute documentation deployment: https://app.circleci.com/pipelines/github/mne-tools/mne-bids-pipeline/3557/workflows/3458e5cc-c471-4664-8d0a-b0cc4961f9eb/jobs/41986/parallel-runs/0/steps/0-107 ```shell #!/bin/bash -eo pipefail ./.circleci/setup_bash.sh CIRCLE_JOB=deploy_docs COMMIT_MESSAGE=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676) COMMIT_MESSAGE_ESCAPED=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676) CIRCLE_REQUESTED_JOB= Running job deploy_docs for main branch ./.circleci/setup_bash.sh: line 35: sudo: command not found Exited with code exit status 127 CircleCI received exit code 127 ``` </issue> <code> [start of docs/source/features/gen_steps.py] 1 #!/bin/env python 2 """Generate steps.md.""" 3 4 import importlib 5 from pathlib import Path 6 from mne_bids_pipeline._config_utils import _get_step_modules 7 8 pre = """\ 9 # Processing steps 10 11 The following table provides a concise summary of each step in the Study 12 Template. All steps exist in the `steps`/ directory. 13 """ 14 15 step_modules = _get_step_modules() 16 17 # Construct the lines of steps.md 18 lines = [pre] 19 for di, (dir_, modules) in enumerate(step_modules.items(), 1): 20 if dir_ == 'all': 21 continue # this is an alias 22 dir_module = importlib.import_module(f'mne_bids_pipeline.steps.{dir_}') 23 dir_header = dir_module.__doc__.split('\n')[0].rstrip('.') 24 dir_body = dir_module.__doc__.split('\n', maxsplit=1) 25 if len(dir_body) > 1: 26 dir_body = dir_body[1].strip() 27 else: 28 dir_body = '' 29 lines.append(f'## {di}. {dir_header}\n') 30 if dir_body: 31 lines.append(f'{dir_body}\n') 32 lines.append('| Processing step | Description |') 33 lines.append('|:----------------|:------------|') 34 # the "all" option 35 dir_name, step_title = dir_, f'Run all {dir_header.lower()} steps.' 36 lines.append(f'`{dir_name}` | {step_title} |') 37 for module in modules: 38 step_name = f'{dir_name}/{Path(module.__file__).name}'[:-3] 39 step_title = module.__doc__.split('\n')[0] 40 lines.append(f'`{step_name}` | {step_title} |') 41 lines.append('') 42 with open(Path(__file__).parent / 'steps.md', 'w') as fid: 43 fid.write('\n'.join(lines)) 44 [end of docs/source/features/gen_steps.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py --- a/docs/source/features/gen_steps.py +++ b/docs/source/features/gen_steps.py @@ -12,6 +12,7 @@ Template. All steps exist in the `steps`/ directory. """ +print('Generating steps …') step_modules = _get_step_modules() # Construct the lines of steps.md
{"golden_diff": "diff --git a/docs/source/features/gen_steps.py b/docs/source/features/gen_steps.py\n--- a/docs/source/features/gen_steps.py\n+++ b/docs/source/features/gen_steps.py\n@@ -12,6 +12,7 @@\n Template. All steps exist in the `steps`/ directory.\n \"\"\"\n \n+print('Generating steps \u2026')\n step_modules = _get_step_modules()\n \n # Construct the lines of steps.md\n", "issue": "Doc deployment step failing\nThe latest CI run failed to execute documentation deployment:\r\nhttps://app.circleci.com/pipelines/github/mne-tools/mne-bids-pipeline/3557/workflows/3458e5cc-c471-4664-8d0a-b0cc4961f9eb/jobs/41986/parallel-runs/0/steps/0-107\r\n\r\n```shell\r\n#!/bin/bash -eo pipefail\r\n./.circleci/setup_bash.sh\r\nCIRCLE_JOB=deploy_docs\r\nCOMMIT_MESSAGE=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676)\r\nCOMMIT_MESSAGE_ESCAPED=68c63d6878992fb7c298f24420f1d349c6811079 MAINT: Use mike for doc deployment (#676)\r\nCIRCLE_REQUESTED_JOB=\r\nRunning job deploy_docs for main branch\r\n./.circleci/setup_bash.sh: line 35: sudo: command not found\r\n\r\nExited with code exit status 127\r\nCircleCI received exit code 127\r\n```\n", "before_files": [{"content": "#!/bin/env python\n\"\"\"Generate steps.md.\"\"\"\n\nimport importlib\nfrom pathlib import Path\nfrom mne_bids_pipeline._config_utils import _get_step_modules\n\npre = \"\"\"\\\n# Processing steps\n\nThe following table provides a concise summary of each step in the Study\nTemplate. All steps exist in the `steps`/ directory.\n\"\"\"\n\nstep_modules = _get_step_modules()\n\n# Construct the lines of steps.md\nlines = [pre]\nfor di, (dir_, modules) in enumerate(step_modules.items(), 1):\n if dir_ == 'all':\n continue # this is an alias\n dir_module = importlib.import_module(f'mne_bids_pipeline.steps.{dir_}')\n dir_header = dir_module.__doc__.split('\\n')[0].rstrip('.')\n dir_body = dir_module.__doc__.split('\\n', maxsplit=1)\n if len(dir_body) > 1:\n dir_body = dir_body[1].strip()\n else:\n dir_body = ''\n lines.append(f'## {di}. {dir_header}\\n')\n if dir_body:\n lines.append(f'{dir_body}\\n')\n lines.append('| Processing step | Description |')\n lines.append('|:----------------|:------------|')\n # the \"all\" option\n dir_name, step_title = dir_, f'Run all {dir_header.lower()} steps.'\n lines.append(f'`{dir_name}` | {step_title} |')\n for module in modules:\n step_name = f'{dir_name}/{Path(module.__file__).name}'[:-3]\n step_title = module.__doc__.split('\\n')[0]\n lines.append(f'`{step_name}` | {step_title} |')\n lines.append('')\nwith open(Path(__file__).parent / 'steps.md', 'w') as fid:\n fid.write('\\n'.join(lines))\n", "path": "docs/source/features/gen_steps.py"}]}
1,307
87
gh_patches_debug_27400
rasdani/github-patches
git_diff
pytorch__text-135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ascii vs. utf-8 in torchtext/datasets/translation.py @nelson-liu: I incorrectly brought this up in pull #52, new issue here When trying to load splits for IWSLT (in french, german, etc...), the loading process would fail with an ascii encoding/decoding error: ``` .data/iwslt/de-en/IWSLT16.TED.dev2010.de-en.en.xml .data/iwslt/de-en/IWSLT16.TED.tst2013.de-en.de.xml Traceback (most recent call last): File "test.py", line 25, in <module> train, val, test = datasets.IWSLT.splits(exts=('.de', '.en'), fields=(DE, EN)) File "build/bdist.linux-x86_64/egg/torchtext/datasets/translation.py", line 116, in splits File "build/bdist.linux-x86_64/egg/torchtext/datasets/translation.py", line 136, in clean UnicodeEncodeError: 'ascii' codec can't encode character u'\xe4' in position 60: ordinal not in range(128) ``` These are my library versions: ``` numpy==1.13.3 regex==2017.9.23 spacy==1.9.0 torch==0.2.0.post4 torchtext==0.2.0b0 (just cloned a few minutes before error) torchvision==0.1.9 ``` Here is the code that I was using, from test/translation.py: ``` from torchtext import data from torchtext import datasets import re import spacy import sys spacy_de = spacy.load('de') spacy_en = spacy.load('en') url = re.compile('(<url>.*</url>)') def tokenize_de(text): return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))] def tokenize_en(text): return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))] # Testing IWSLT DE = data.Field(tokenize=tokenize_de) EN = data.Field(tokenize=tokenize_en) train, val, test = datasets.IWSLT.splits(exts=('.de', '.en'), fields=(DE, EN)) ``` The following fixed it for me, in torchtext/datasets/translation.py. Replace opens with io.opens specifying utf-8 for python2. It's worth noting that a friend with python3 did not have this problem. ``` 127 @staticmethod 128 def clean(path): 129 for f_xml in glob.iglob(os.path.join(path, '*.xml')): 130 print(f_xml) 131 f_txt = os.path.splitext(f_xml)[0] 132 import io 133 with io.open(f_txt, mode="w", encoding="utf-8") as fd_txt: <--- INSERT 134 #with open(f_txt, 'w') as fd_txt: <--- COMMENT 135 root = ET.parse(f_xml).getroot()[0] 136 for doc in root.findall('doc'): 137 for e in doc.findall('seg'): 138 fd_txt.write(e.text.strip() + '\n') 139 xml_tags = ['<url', '<keywords', '<talkid', '<description', 140 '<reviewer', '<translator', '<title', '<speaker'] 141 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')): 142 print(f_orig) 143 f_txt = f_orig.replace('.tags', '') 144 with io.open(f_txt,mode='w',encoding='utf-8') as fd_txt, io.open(f_orig,mode='r',encoding='utf=8') as fd_orig: <--- INSERT 145 #with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig: <--- COMMENT 146 for l in fd_orig: 147 if not any(tag in l for tag in xml_tags): 148 fd_txt.write(l.strip() + '\n') ``` @jekbradbury, you were correct in pull #52 that I didn't need the middle block explicitly encoding/decoding (not seen here) since the file is already open as utf-8. </issue> <code> [start of torchtext/datasets/translation.py] 1 import os 2 import xml.etree.ElementTree as ET 3 import glob 4 5 from .. import data 6 7 8 class TranslationDataset(data.Dataset): 9 """Defines a dataset for machine translation.""" 10 11 @staticmethod 12 def sort_key(ex): 13 return data.interleave_keys(len(ex.src), len(ex.trg)) 14 15 def __init__(self, path, exts, fields, **kwargs): 16 """Create a TranslationDataset given paths and fields. 17 18 Arguments: 19 path: Common prefix of paths to the data files for both languages. 20 exts: A tuple containing the extension to path for each language. 21 fields: A tuple containing the fields that will be used for data 22 in each language. 23 Remaining keyword arguments: Passed to the constructor of 24 data.Dataset. 25 """ 26 if not isinstance(fields[0], (tuple, list)): 27 fields = [('src', fields[0]), ('trg', fields[1])] 28 29 src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts) 30 31 examples = [] 32 with open(src_path) as src_file, open(trg_path) as trg_file: 33 for src_line, trg_line in zip(src_file, trg_file): 34 src_line, trg_line = src_line.strip(), trg_line.strip() 35 if src_line != '' and trg_line != '': 36 examples.append(data.Example.fromlist( 37 [src_line, trg_line], fields)) 38 39 super(TranslationDataset, self).__init__(examples, fields, **kwargs) 40 41 42 class Multi30k(TranslationDataset, data.Dataset): 43 """Defines a dataset for the multi-modal WMT 2016 task""" 44 45 urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz', 46 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz', 47 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz'] 48 name = 'multi30k' 49 dirname = '' 50 51 @classmethod 52 def splits(cls, exts, fields, root='.data', 53 train='train', val='val', test='test', **kwargs): 54 """Create dataset objects for splits of the Multi30k dataset. 55 56 Arguments: 57 58 root: directory containing Multi30k data 59 exts: A tuple containing the extension to path for each language. 60 fields: A tuple containing the fields that will be used for data 61 in each language. 62 train: The prefix of the train data. Default: 'train'. 63 validation: The prefix of the validation data. Default: 'val'. 64 test: The prefix of the test data. Default: 'test'. 65 Remaining keyword arguments: Passed to the splits method of 66 Dataset. 67 """ 68 path = cls.download(root) 69 70 train_data = None if train is None else cls( 71 os.path.join(path, train), exts, fields, **kwargs) 72 val_data = None if val is None else cls( 73 os.path.join(path, val), exts, fields, **kwargs) 74 test_data = None if test is None else cls( 75 os.path.join(path, test), exts, fields, **kwargs) 76 return tuple(d for d in (train_data, val_data, test_data) 77 if d is not None) 78 79 80 class IWSLT(TranslationDataset, data.Dataset): 81 """Defines a dataset for the IWSLT 2016 task""" 82 83 base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz' 84 name = 'iwslt' 85 base_dirname = '{}-{}' 86 87 @classmethod 88 def splits(cls, exts, fields, root='.data', 89 train='train', val='IWSLT16.TED.tst2013', 90 test='IWSLT16.TED.tst2014', **kwargs): 91 """Create dataset objects for splits of the IWSLT dataset. 92 93 Arguments: 94 95 root: directory containing Multi30k data 96 exts: A tuple containing the extension to path for each language. 97 fields: A tuple containing the fields that will be used for data 98 in each language. 99 train: The prefix of the train data. Default: 'train'. 100 validation: The prefix of the validation data. Default: 'val'. 101 test: The prefix of the test data. Default: 'test'. 102 Remaining keyword arguments: Passed to the splits method of 103 Dataset. 104 """ 105 cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:]) 106 cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)] 107 check = os.path.join(root, cls.name, cls.dirname) 108 path = cls.download(root, check=check) 109 110 train = '.'.join([train, cls.dirname]) 111 val = '.'.join([val, cls.dirname]) 112 if test is not None: 113 test = '.'.join([test, cls.dirname]) 114 115 if not os.path.exists(os.path.join(path, train) + exts[0]): 116 cls.clean(path) 117 118 train_data = None if train is None else cls( 119 os.path.join(path, train), exts, fields, **kwargs) 120 val_data = None if val is None else cls( 121 os.path.join(path, val), exts, fields, **kwargs) 122 test_data = None if test is None else cls( 123 os.path.join(path, test), exts, fields, **kwargs) 124 return tuple(d for d in (train_data, val_data, test_data) 125 if d is not None) 126 127 @staticmethod 128 def clean(path): 129 for f_xml in glob.iglob(os.path.join(path, '*.xml')): 130 print(f_xml) 131 f_txt = os.path.splitext(f_xml)[0] 132 with open(f_txt, 'w') as fd_txt: 133 root = ET.parse(f_xml).getroot()[0] 134 for doc in root.findall('doc'): 135 for e in doc.findall('seg'): 136 fd_txt.write(e.text.strip() + '\n') 137 138 xml_tags = ['<url', '<keywords', '<talkid', '<description', 139 '<reviewer', '<translator', '<title', '<speaker'] 140 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')): 141 print(f_orig) 142 f_txt = f_orig.replace('.tags', '') 143 with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig: 144 for l in fd_orig: 145 if not any(tag in l for tag in xml_tags): 146 fd_txt.write(l.strip() + '\n') 147 [end of torchtext/datasets/translation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py --- a/torchtext/datasets/translation.py +++ b/torchtext/datasets/translation.py @@ -1,6 +1,7 @@ import os import xml.etree.ElementTree as ET import glob +import io from .. import data @@ -129,7 +130,7 @@ for f_xml in glob.iglob(os.path.join(path, '*.xml')): print(f_xml) f_txt = os.path.splitext(f_xml)[0] - with open(f_txt, 'w') as fd_txt: + with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt: root = ET.parse(f_xml).getroot()[0] for doc in root.findall('doc'): for e in doc.findall('seg'): @@ -140,7 +141,8 @@ for f_orig in glob.iglob(os.path.join(path, 'train.tags*')): print(f_orig) f_txt = f_orig.replace('.tags', '') - with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig: + with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \ + io.open(f_orig, mode='w', encoding='utf-8') as fd_orig: for l in fd_orig: if not any(tag in l for tag in xml_tags): fd_txt.write(l.strip() + '\n')
{"golden_diff": "diff --git a/torchtext/datasets/translation.py b/torchtext/datasets/translation.py\n--- a/torchtext/datasets/translation.py\n+++ b/torchtext/datasets/translation.py\n@@ -1,6 +1,7 @@\n import os\n import xml.etree.ElementTree as ET\n import glob\n+import io\n \n from .. import data\n \n@@ -129,7 +130,7 @@\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n- with open(f_txt, 'w') as fd_txt:\n+ with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n@@ -140,7 +141,8 @@\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n- with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig:\n+ with io.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \\\n+ io.open(f_orig, mode='w', encoding='utf-8') as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n", "issue": "ascii vs. utf-8 in torchtext/datasets/translation.py\n@nelson-liu: I incorrectly brought this up in pull #52, new issue here\r\n\r\nWhen trying to load splits for IWSLT (in french, german, etc...), the loading process would fail with an ascii encoding/decoding error:\r\n```\r\n.data/iwslt/de-en/IWSLT16.TED.dev2010.de-en.en.xml\r\n.data/iwslt/de-en/IWSLT16.TED.tst2013.de-en.de.xml\r\nTraceback (most recent call last):\r\n File \"test.py\", line 25, in <module>\r\n train, val, test = datasets.IWSLT.splits(exts=('.de', '.en'), fields=(DE, EN))\r\n File \"build/bdist.linux-x86_64/egg/torchtext/datasets/translation.py\", line 116, in splits\r\n File \"build/bdist.linux-x86_64/egg/torchtext/datasets/translation.py\", line 136, in clean\r\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\xe4' in position 60: ordinal not in range(128)\r\n```\r\nThese are my library versions:\r\n```\r\nnumpy==1.13.3\r\nregex==2017.9.23\r\nspacy==1.9.0\r\ntorch==0.2.0.post4\r\ntorchtext==0.2.0b0 (just cloned a few minutes before error)\r\ntorchvision==0.1.9\r\n```\r\nHere is the code that I was using, from test/translation.py:\r\n```\r\nfrom torchtext import data\r\nfrom torchtext import datasets\r\n\r\nimport re\r\nimport spacy\r\nimport sys\r\n\r\nspacy_de = spacy.load('de')\r\nspacy_en = spacy.load('en')\r\n\r\nurl = re.compile('(<url>.*</url>)')\r\n\r\ndef tokenize_de(text):\r\n return [tok.text for tok in spacy_de.tokenizer(url.sub('@URL@', text))]\r\n\r\ndef tokenize_en(text):\r\n return [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]\r\n\r\n# Testing IWSLT\r\nDE = data.Field(tokenize=tokenize_de)\r\nEN = data.Field(tokenize=tokenize_en)\r\ntrain, val, test = datasets.IWSLT.splits(exts=('.de', '.en'), fields=(DE, EN))\r\n```\r\nThe following fixed it for me, in torchtext/datasets/translation.py. Replace opens with io.opens specifying utf-8 for python2. It's worth noting that a friend with python3 did not have this problem.\r\n```\r\n127 @staticmethod\r\n128 def clean(path):\r\n129 for f_xml in glob.iglob(os.path.join(path, '*.xml')):\r\n130 print(f_xml)\r\n131 f_txt = os.path.splitext(f_xml)[0]\r\n132 import io\r\n133 with io.open(f_txt, mode=\"w\", encoding=\"utf-8\") as fd_txt: <--- INSERT\r\n134 #with open(f_txt, 'w') as fd_txt: <--- COMMENT\r\n135 root = ET.parse(f_xml).getroot()[0]\r\n136 for doc in root.findall('doc'):\r\n137 for e in doc.findall('seg'):\r\n138 fd_txt.write(e.text.strip() + '\\n')\r\n139 xml_tags = ['<url', '<keywords', '<talkid', '<description',\r\n140 '<reviewer', '<translator', '<title', '<speaker']\r\n141 for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\r\n142 print(f_orig)\r\n143 f_txt = f_orig.replace('.tags', '')\r\n144 with io.open(f_txt,mode='w',encoding='utf-8') as fd_txt, io.open(f_orig,mode='r',encoding='utf=8') as fd_orig: <--- INSERT\r\n145 #with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig: <--- COMMENT\r\n146 for l in fd_orig:\r\n147 if not any(tag in l for tag in xml_tags):\r\n148 fd_txt.write(l.strip() + '\\n')\r\n```\r\n@jekbradbury, you were correct in pull #52 that I didn't need the middle block explicitly encoding/decoding (not seen here) since the file is already open as utf-8.\n", "before_files": [{"content": "import os\nimport xml.etree.ElementTree as ET\nimport glob\n\nfrom .. import data\n\n\nclass TranslationDataset(data.Dataset):\n \"\"\"Defines a dataset for machine translation.\"\"\"\n\n @staticmethod\n def sort_key(ex):\n return data.interleave_keys(len(ex.src), len(ex.trg))\n\n def __init__(self, path, exts, fields, **kwargs):\n \"\"\"Create a TranslationDataset given paths and fields.\n\n Arguments:\n path: Common prefix of paths to the data files for both languages.\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n if not isinstance(fields[0], (tuple, list)):\n fields = [('src', fields[0]), ('trg', fields[1])]\n\n src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)\n\n examples = []\n with open(src_path) as src_file, open(trg_path) as trg_file:\n for src_line, trg_line in zip(src_file, trg_file):\n src_line, trg_line = src_line.strip(), trg_line.strip()\n if src_line != '' and trg_line != '':\n examples.append(data.Example.fromlist(\n [src_line, trg_line], fields))\n\n super(TranslationDataset, self).__init__(examples, fields, **kwargs)\n\n\nclass Multi30k(TranslationDataset, data.Dataset):\n \"\"\"Defines a dataset for the multi-modal WMT 2016 task\"\"\"\n\n urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',\n 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',\n 'https://staff.fnwi.uva.nl/d.elliott/wmt16/mmt16_task1_test.tgz']\n name = 'multi30k'\n dirname = ''\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', val='val', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the Multi30k dataset.\n\n Arguments:\n\n root: directory containing Multi30k data\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n path = cls.download(root)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if val is None else cls(\n os.path.join(path, val), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n\nclass IWSLT(TranslationDataset, data.Dataset):\n \"\"\"Defines a dataset for the IWSLT 2016 task\"\"\"\n\n base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'\n name = 'iwslt'\n base_dirname = '{}-{}'\n\n @classmethod\n def splits(cls, exts, fields, root='.data',\n train='train', val='IWSLT16.TED.tst2013',\n test='IWSLT16.TED.tst2014', **kwargs):\n \"\"\"Create dataset objects for splits of the IWSLT dataset.\n\n Arguments:\n\n root: directory containing Multi30k data\n exts: A tuple containing the extension to path for each language.\n fields: A tuple containing the fields that will be used for data\n in each language.\n train: The prefix of the train data. Default: 'train'.\n validation: The prefix of the validation data. Default: 'val'.\n test: The prefix of the test data. Default: 'test'.\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])\n cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]\n check = os.path.join(root, cls.name, cls.dirname)\n path = cls.download(root, check=check)\n\n train = '.'.join([train, cls.dirname])\n val = '.'.join([val, cls.dirname])\n if test is not None:\n test = '.'.join([test, cls.dirname])\n\n if not os.path.exists(os.path.join(path, train) + exts[0]):\n cls.clean(path)\n\n train_data = None if train is None else cls(\n os.path.join(path, train), exts, fields, **kwargs)\n val_data = None if val is None else cls(\n os.path.join(path, val), exts, fields, **kwargs)\n test_data = None if test is None else cls(\n os.path.join(path, test), exts, fields, **kwargs)\n return tuple(d for d in (train_data, val_data, test_data)\n if d is not None)\n\n @staticmethod\n def clean(path):\n for f_xml in glob.iglob(os.path.join(path, '*.xml')):\n print(f_xml)\n f_txt = os.path.splitext(f_xml)[0]\n with open(f_txt, 'w') as fd_txt:\n root = ET.parse(f_xml).getroot()[0]\n for doc in root.findall('doc'):\n for e in doc.findall('seg'):\n fd_txt.write(e.text.strip() + '\\n')\n\n xml_tags = ['<url', '<keywords', '<talkid', '<description',\n '<reviewer', '<translator', '<title', '<speaker']\n for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):\n print(f_orig)\n f_txt = f_orig.replace('.tags', '')\n with open(f_txt, 'w') as fd_txt, open(f_orig) as fd_orig:\n for l in fd_orig:\n if not any(tag in l for tag in xml_tags):\n fd_txt.write(l.strip() + '\\n')\n", "path": "torchtext/datasets/translation.py"}]}
3,326
339
gh_patches_debug_2713
rasdani/github-patches
git_diff
wagtail__wagtail-9923
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Search on listing views doesn't work unless the `?q=` param exists in the URL <!-- Found a bug? Please fill out the sections below. 👍 --> ### Issue Summary Possible regression in https://github.com/wagtail/wagtail/pull/9768 The `URLSearchParams.get()` returns `null` if the param doesn't exist, so the following code: https://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/client/src/entrypoints/admin/core.js#L270-L276 will crash during `currentQuery.trim()` when searching on the listing views (snippets, images, etc.) if the `?q=` param doesn't exist in the URL. Might be a good time to add `required=False` in here as well: https://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/wagtail/admin/forms/search.py#L12 to remove this silly error when `q` is an empty string: <img width="473" alt="image" src="https://user-images.githubusercontent.com/6379424/213499685-ce37c064-2635-434f-952f-e85fae4ab9af.png"> <!-- A summary of the issue. --> ### Steps to Reproduce 1. Spin up bakerydemo 2. Open the images listing 3. Try to search </issue> <code> [start of wagtail/admin/forms/search.py] 1 from django import forms 2 from django.utils.translation import gettext as _ 3 from django.utils.translation import gettext_lazy 4 5 6 class SearchForm(forms.Form): 7 def __init__(self, *args, **kwargs): 8 placeholder = kwargs.pop("placeholder", _("Search")) 9 super().__init__(*args, **kwargs) 10 self.fields["q"].widget.attrs = {"placeholder": placeholder} 11 12 q = forms.CharField(label=gettext_lazy("Search term"), widget=forms.TextInput()) 13 [end of wagtail/admin/forms/search.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py --- a/wagtail/admin/forms/search.py +++ b/wagtail/admin/forms/search.py @@ -9,4 +9,8 @@ super().__init__(*args, **kwargs) self.fields["q"].widget.attrs = {"placeholder": placeholder} - q = forms.CharField(label=gettext_lazy("Search term"), widget=forms.TextInput()) + q = forms.CharField( + label=gettext_lazy("Search term"), + widget=forms.TextInput(), + required=False, + )
{"golden_diff": "diff --git a/wagtail/admin/forms/search.py b/wagtail/admin/forms/search.py\n--- a/wagtail/admin/forms/search.py\n+++ b/wagtail/admin/forms/search.py\n@@ -9,4 +9,8 @@\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n \n- q = forms.CharField(label=gettext_lazy(\"Search term\"), widget=forms.TextInput())\n+ q = forms.CharField(\n+ label=gettext_lazy(\"Search term\"),\n+ widget=forms.TextInput(),\n+ required=False,\n+ )\n", "issue": "Search on listing views doesn't work unless the `?q=` param exists in the URL\n<!--\r\nFound a bug? Please fill out the sections below. \ud83d\udc4d\r\n-->\r\n\r\n### Issue Summary\r\n\r\nPossible regression in https://github.com/wagtail/wagtail/pull/9768\r\n\r\nThe `URLSearchParams.get()` returns `null` if the param doesn't exist, so the following code:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/client/src/entrypoints/admin/core.js#L270-L276\r\n\r\nwill crash during `currentQuery.trim()` when searching on the listing views (snippets, images, etc.) if the `?q=` param doesn't exist in the URL.\r\n\r\nMight be a good time to add `required=False` in here as well:\r\n\r\nhttps://github.com/wagtail/wagtail/blob/a3f10acae17c892d843c419495e4204adb3ed991/wagtail/admin/forms/search.py#L12\r\n\r\nto remove this silly error when `q` is an empty string:\r\n\r\n<img width=\"473\" alt=\"image\" src=\"https://user-images.githubusercontent.com/6379424/213499685-ce37c064-2635-434f-952f-e85fae4ab9af.png\">\r\n\r\n<!--\r\nA summary of the issue.\r\n-->\r\n\r\n### Steps to Reproduce\r\n\r\n1. Spin up bakerydemo\r\n2. Open the images listing\r\n3. Try to search\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\n\n\nclass SearchForm(forms.Form):\n def __init__(self, *args, **kwargs):\n placeholder = kwargs.pop(\"placeholder\", _(\"Search\"))\n super().__init__(*args, **kwargs)\n self.fields[\"q\"].widget.attrs = {\"placeholder\": placeholder}\n\n q = forms.CharField(label=gettext_lazy(\"Search term\"), widget=forms.TextInput())\n", "path": "wagtail/admin/forms/search.py"}]}
1,029
124
gh_patches_debug_5729
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-995
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Books with same title but different subtitle are hard to distinguish in TL **Is your feature request related to a problem? Please describe.** Sometimes a book series has the same title for all books but different subtitles. Example: [Saga: Book One](https://bookwyrm.social/book/39501), [Saga: Book Two](https://bookwyrm.social/book/39471), [Saga: Book Three](https://bookwyrm.social/book/39465). This leads to confusion when something about a book is in my TL and only the title is shown. **Describe the solution you'd like** Can we include the subtitle in statuses, if it isn't too long? Maybe if title + subtitle is < X characters, show subtitle? **Additional context** ![screenshot_2021-04-23T10:25:32](https://user-images.githubusercontent.com/3681516/115842957-a1797d80-a41e-11eb-8151-a60b55da64fd.png) The bottom is “Saga: Book One” and the top is “Saga: Book Two”. </issue> <code> [start of bookwyrm/templatetags/bookwyrm_tags.py] 1 """ template filters """ 2 from uuid import uuid4 3 4 from django import template, utils 5 from django.db.models import Avg 6 7 from bookwyrm import models, views 8 from bookwyrm.views.status import to_markdown 9 10 11 register = template.Library() 12 13 14 @register.filter(name="dict_key") 15 def dict_key(d, k): 16 """ Returns the given key from a dictionary. """ 17 return d.get(k) or 0 18 19 20 @register.filter(name="rating") 21 def get_rating(book, user): 22 """ get the overall rating of a book """ 23 queryset = views.helpers.privacy_filter( 24 user, models.Review.objects.filter(book=book) 25 ) 26 return queryset.aggregate(Avg("rating"))["rating__avg"] 27 28 29 @register.filter(name="user_rating") 30 def get_user_rating(book, user): 31 """ get a user's rating of a book """ 32 rating = ( 33 models.Review.objects.filter( 34 user=user, 35 book=book, 36 rating__isnull=False, 37 ) 38 .order_by("-published_date") 39 .first() 40 ) 41 if rating: 42 return rating.rating 43 return 0 44 45 46 @register.filter(name="username") 47 def get_user_identifier(user): 48 """ use localname for local users, username for remote """ 49 return user.localname if user.localname else user.username 50 51 52 @register.filter(name="notification_count") 53 def get_notification_count(user): 54 """ how many UNREAD notifications are there """ 55 return user.notification_set.filter(read=False).count() 56 57 58 @register.filter(name="replies") 59 def get_replies(status): 60 """ get all direct replies to a status """ 61 # TODO: this limit could cause problems 62 return models.Status.objects.filter( 63 reply_parent=status, 64 deleted=False, 65 ).select_subclasses()[:10] 66 67 68 @register.filter(name="parent") 69 def get_parent(status): 70 """ get the reply parent for a status """ 71 return ( 72 models.Status.objects.filter(id=status.reply_parent_id) 73 .select_subclasses() 74 .get() 75 ) 76 77 78 @register.filter(name="liked") 79 def get_user_liked(user, status): 80 """ did the given user fav a status? """ 81 try: 82 models.Favorite.objects.get(user=user, status=status) 83 return True 84 except models.Favorite.DoesNotExist: 85 return False 86 87 88 @register.filter(name="boosted") 89 def get_user_boosted(user, status): 90 """ did the given user fav a status? """ 91 return user.id in status.boosters.all().values_list("user", flat=True) 92 93 94 @register.filter(name="follow_request_exists") 95 def follow_request_exists(user, requester): 96 """ see if there is a pending follow request for a user """ 97 try: 98 models.UserFollowRequest.objects.filter( 99 user_subject=requester, 100 user_object=user, 101 ).get() 102 return True 103 except models.UserFollowRequest.DoesNotExist: 104 return False 105 106 107 @register.filter(name="boosted_status") 108 def get_boosted(boost): 109 """ load a boosted status. have to do this or it wont get foregin keys """ 110 return ( 111 models.Status.objects.select_subclasses() 112 .filter(id=boost.boosted_status.id) 113 .get() 114 ) 115 116 117 @register.filter(name="book_description") 118 def get_book_description(book): 119 """ use the work's text if the book doesn't have it """ 120 return book.description or book.parent_work.description 121 122 123 @register.filter(name="uuid") 124 def get_uuid(identifier): 125 """ for avoiding clashing ids when there are many forms """ 126 return "%s%s" % (identifier, uuid4()) 127 128 129 @register.filter(name="to_markdown") 130 def get_markdown(content): 131 """ convert markdown to html """ 132 if content: 133 return to_markdown(content) 134 return None 135 136 137 @register.filter(name="mentions") 138 def get_mentions(status, user): 139 """ people to @ in a reply: the parent and all mentions """ 140 mentions = set([status.user] + list(status.mention_users.all())) 141 return ( 142 " ".join("@" + get_user_identifier(m) for m in mentions if not m == user) + " " 143 ) 144 145 146 @register.filter(name="status_preview_name") 147 def get_status_preview_name(obj): 148 """ text snippet with book context for a status """ 149 name = obj.__class__.__name__.lower() 150 if name == "review": 151 return "%s of <em>%s</em>" % (name, obj.book.title) 152 if name == "comment": 153 return "%s on <em>%s</em>" % (name, obj.book.title) 154 if name == "quotation": 155 return "%s from <em>%s</em>" % (name, obj.book.title) 156 return name 157 158 159 @register.filter(name="next_shelf") 160 def get_next_shelf(current_shelf): 161 """ shelf you'd use to update reading progress """ 162 if current_shelf == "to-read": 163 return "reading" 164 if current_shelf == "reading": 165 return "read" 166 if current_shelf == "read": 167 return "read" 168 return "to-read" 169 170 171 @register.simple_tag(takes_context=False) 172 def related_status(notification): 173 """ for notifications """ 174 if not notification.related_status: 175 return None 176 if hasattr(notification.related_status, "quotation"): 177 return notification.related_status.quotation 178 if hasattr(notification.related_status, "review"): 179 return notification.related_status.review 180 if hasattr(notification.related_status, "comment"): 181 return notification.related_status.comment 182 return notification.related_status 183 184 185 @register.simple_tag(takes_context=True) 186 def active_shelf(context, book): 187 """ check what shelf a user has a book on, if any """ 188 shelf = models.ShelfBook.objects.filter( 189 shelf__user=context["request"].user, book__in=book.parent_work.editions.all() 190 ).first() 191 return shelf if shelf else {"book": book} 192 193 194 @register.simple_tag(takes_context=False) 195 def latest_read_through(book, user): 196 """ the most recent read activity """ 197 return ( 198 models.ReadThrough.objects.filter(user=user, book=book) 199 .order_by("-start_date") 200 .first() 201 ) 202 203 204 @register.simple_tag(takes_context=False) 205 def active_read_through(book, user): 206 """ the most recent read activity """ 207 return ( 208 models.ReadThrough.objects.filter( 209 user=user, book=book, finish_date__isnull=True 210 ) 211 .order_by("-start_date") 212 .first() 213 ) 214 215 216 @register.simple_tag(takes_context=False) 217 def comparison_bool(str1, str2): 218 """ idk why I need to write a tag for this, it reutrns a bool """ 219 return str1 == str2 220 221 222 @register.simple_tag(takes_context=False) 223 def get_lang(): 224 """ get current language, strip to the first two letters """ 225 language = utils.translation.get_language() 226 return language[0 : language.find("-")] 227 [end of bookwyrm/templatetags/bookwyrm_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/templatetags/bookwyrm_tags.py b/bookwyrm/templatetags/bookwyrm_tags.py --- a/bookwyrm/templatetags/bookwyrm_tags.py +++ b/bookwyrm/templatetags/bookwyrm_tags.py @@ -168,6 +168,17 @@ return "to-read" [email protected](name="title") +def get_title(book): + """ display the subtitle if the title is short """ + if not book: + return "" + title = book.title + if len(title) < 6 and book.subtitle: + title = "{:s}: {:s}".format(title, book.subtitle) + return title + + @register.simple_tag(takes_context=False) def related_status(notification): """ for notifications """
{"golden_diff": "diff --git a/bookwyrm/templatetags/bookwyrm_tags.py b/bookwyrm/templatetags/bookwyrm_tags.py\n--- a/bookwyrm/templatetags/bookwyrm_tags.py\n+++ b/bookwyrm/templatetags/bookwyrm_tags.py\n@@ -168,6 +168,17 @@\n return \"to-read\"\n \n \[email protected](name=\"title\")\n+def get_title(book):\n+ \"\"\" display the subtitle if the title is short \"\"\"\n+ if not book:\n+ return \"\"\n+ title = book.title\n+ if len(title) < 6 and book.subtitle:\n+ title = \"{:s}: {:s}\".format(title, book.subtitle)\n+ return title\n+\n+\n @register.simple_tag(takes_context=False)\n def related_status(notification):\n \"\"\" for notifications \"\"\"\n", "issue": "Books with same title but different subtitle are hard to distinguish in TL\n**Is your feature request related to a problem? Please describe.**\r\nSometimes a book series has the same title for all books but different subtitles. Example: [Saga: Book One](https://bookwyrm.social/book/39501), [Saga: Book Two](https://bookwyrm.social/book/39471), [Saga: Book Three](https://bookwyrm.social/book/39465). This leads to confusion when something about a book is in my TL and only the title is shown.\r\n\r\n**Describe the solution you'd like**\r\nCan we include the subtitle in statuses, if it isn't too long? Maybe if title + subtitle is < X characters, show subtitle?\r\n\r\n**Additional context**\r\n![screenshot_2021-04-23T10:25:32](https://user-images.githubusercontent.com/3681516/115842957-a1797d80-a41e-11eb-8151-a60b55da64fd.png)\r\n The bottom is \u201cSaga: Book One\u201d and the top is \u201cSaga: Book Two\u201d.\r\n\n", "before_files": [{"content": "\"\"\" template filters \"\"\"\nfrom uuid import uuid4\n\nfrom django import template, utils\nfrom django.db.models import Avg\n\nfrom bookwyrm import models, views\nfrom bookwyrm.views.status import to_markdown\n\n\nregister = template.Library()\n\n\[email protected](name=\"dict_key\")\ndef dict_key(d, k):\n \"\"\" Returns the given key from a dictionary. \"\"\"\n return d.get(k) or 0\n\n\[email protected](name=\"rating\")\ndef get_rating(book, user):\n \"\"\" get the overall rating of a book \"\"\"\n queryset = views.helpers.privacy_filter(\n user, models.Review.objects.filter(book=book)\n )\n return queryset.aggregate(Avg(\"rating\"))[\"rating__avg\"]\n\n\[email protected](name=\"user_rating\")\ndef get_user_rating(book, user):\n \"\"\" get a user's rating of a book \"\"\"\n rating = (\n models.Review.objects.filter(\n user=user,\n book=book,\n rating__isnull=False,\n )\n .order_by(\"-published_date\")\n .first()\n )\n if rating:\n return rating.rating\n return 0\n\n\[email protected](name=\"username\")\ndef get_user_identifier(user):\n \"\"\" use localname for local users, username for remote \"\"\"\n return user.localname if user.localname else user.username\n\n\[email protected](name=\"notification_count\")\ndef get_notification_count(user):\n \"\"\" how many UNREAD notifications are there \"\"\"\n return user.notification_set.filter(read=False).count()\n\n\[email protected](name=\"replies\")\ndef get_replies(status):\n \"\"\" get all direct replies to a status \"\"\"\n # TODO: this limit could cause problems\n return models.Status.objects.filter(\n reply_parent=status,\n deleted=False,\n ).select_subclasses()[:10]\n\n\[email protected](name=\"parent\")\ndef get_parent(status):\n \"\"\" get the reply parent for a status \"\"\"\n return (\n models.Status.objects.filter(id=status.reply_parent_id)\n .select_subclasses()\n .get()\n )\n\n\[email protected](name=\"liked\")\ndef get_user_liked(user, status):\n \"\"\" did the given user fav a status? \"\"\"\n try:\n models.Favorite.objects.get(user=user, status=status)\n return True\n except models.Favorite.DoesNotExist:\n return False\n\n\[email protected](name=\"boosted\")\ndef get_user_boosted(user, status):\n \"\"\" did the given user fav a status? \"\"\"\n return user.id in status.boosters.all().values_list(\"user\", flat=True)\n\n\[email protected](name=\"follow_request_exists\")\ndef follow_request_exists(user, requester):\n \"\"\" see if there is a pending follow request for a user \"\"\"\n try:\n models.UserFollowRequest.objects.filter(\n user_subject=requester,\n user_object=user,\n ).get()\n return True\n except models.UserFollowRequest.DoesNotExist:\n return False\n\n\[email protected](name=\"boosted_status\")\ndef get_boosted(boost):\n \"\"\" load a boosted status. have to do this or it wont get foregin keys \"\"\"\n return (\n models.Status.objects.select_subclasses()\n .filter(id=boost.boosted_status.id)\n .get()\n )\n\n\[email protected](name=\"book_description\")\ndef get_book_description(book):\n \"\"\" use the work's text if the book doesn't have it \"\"\"\n return book.description or book.parent_work.description\n\n\[email protected](name=\"uuid\")\ndef get_uuid(identifier):\n \"\"\" for avoiding clashing ids when there are many forms \"\"\"\n return \"%s%s\" % (identifier, uuid4())\n\n\[email protected](name=\"to_markdown\")\ndef get_markdown(content):\n \"\"\" convert markdown to html \"\"\"\n if content:\n return to_markdown(content)\n return None\n\n\[email protected](name=\"mentions\")\ndef get_mentions(status, user):\n \"\"\" people to @ in a reply: the parent and all mentions \"\"\"\n mentions = set([status.user] + list(status.mention_users.all()))\n return (\n \" \".join(\"@\" + get_user_identifier(m) for m in mentions if not m == user) + \" \"\n )\n\n\[email protected](name=\"status_preview_name\")\ndef get_status_preview_name(obj):\n \"\"\" text snippet with book context for a status \"\"\"\n name = obj.__class__.__name__.lower()\n if name == \"review\":\n return \"%s of <em>%s</em>\" % (name, obj.book.title)\n if name == \"comment\":\n return \"%s on <em>%s</em>\" % (name, obj.book.title)\n if name == \"quotation\":\n return \"%s from <em>%s</em>\" % (name, obj.book.title)\n return name\n\n\[email protected](name=\"next_shelf\")\ndef get_next_shelf(current_shelf):\n \"\"\" shelf you'd use to update reading progress \"\"\"\n if current_shelf == \"to-read\":\n return \"reading\"\n if current_shelf == \"reading\":\n return \"read\"\n if current_shelf == \"read\":\n return \"read\"\n return \"to-read\"\n\n\[email protected]_tag(takes_context=False)\ndef related_status(notification):\n \"\"\" for notifications \"\"\"\n if not notification.related_status:\n return None\n if hasattr(notification.related_status, \"quotation\"):\n return notification.related_status.quotation\n if hasattr(notification.related_status, \"review\"):\n return notification.related_status.review\n if hasattr(notification.related_status, \"comment\"):\n return notification.related_status.comment\n return notification.related_status\n\n\[email protected]_tag(takes_context=True)\ndef active_shelf(context, book):\n \"\"\" check what shelf a user has a book on, if any \"\"\"\n shelf = models.ShelfBook.objects.filter(\n shelf__user=context[\"request\"].user, book__in=book.parent_work.editions.all()\n ).first()\n return shelf if shelf else {\"book\": book}\n\n\[email protected]_tag(takes_context=False)\ndef latest_read_through(book, user):\n \"\"\" the most recent read activity \"\"\"\n return (\n models.ReadThrough.objects.filter(user=user, book=book)\n .order_by(\"-start_date\")\n .first()\n )\n\n\[email protected]_tag(takes_context=False)\ndef active_read_through(book, user):\n \"\"\" the most recent read activity \"\"\"\n return (\n models.ReadThrough.objects.filter(\n user=user, book=book, finish_date__isnull=True\n )\n .order_by(\"-start_date\")\n .first()\n )\n\n\[email protected]_tag(takes_context=False)\ndef comparison_bool(str1, str2):\n \"\"\" idk why I need to write a tag for this, it reutrns a bool \"\"\"\n return str1 == str2\n\n\[email protected]_tag(takes_context=False)\ndef get_lang():\n \"\"\" get current language, strip to the first two letters \"\"\"\n language = utils.translation.get_language()\n return language[0 : language.find(\"-\")]\n", "path": "bookwyrm/templatetags/bookwyrm_tags.py"}]}
2,862
185
gh_patches_debug_3183
rasdani/github-patches
git_diff
mlflow__mlflow-8499
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG] Missing `basic_auth.ini` in PyPI package ### Issues Policy acknowledgement - [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) ### Willingness to contribute Yes. I can contribute a fix for this bug independently. ### MLflow version - Client: 2.3.2 - Tracking server: 2.3.2 ### System information - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: - **Python version**: - **yarn version, if running the dev UI**: ### Describe the problem Freshly installed mlflow package from PyPI is missing `basic_auth.ini` and cannot launch basic auth app. ![image](https://github.com/mlflow/mlflow/assets/22888849/7667e308-982d-44ae-b4ce-a21aae387938) ![image](https://github.com/mlflow/mlflow/assets/22888849/ba7ac8da-ed4b-4318-8fd1-3c2e8bff9689) ### Tracking information <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ```shell REPLACE_ME ``` ### Code to reproduce issue <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` pip install mlflow mlflow ui --app-name=basic-auth ``` ### Stack trace <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` REPLACE_ME ``` ### Other info / logs <!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW --> ``` REPLACE_ME ``` ### What component(s) does this bug affect? - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs - [ ] `area/server-infra`: MLflow Tracking server backend - [X] `area/tracking`: Tracking Service, tracking client APIs, autologging ### What interface(s) does this bug affect? - [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support ### What language(s) does this bug affect? - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages ### What integration(s) does this bug affect? - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations </issue> <code> [start of setup.py] 1 import os 2 import logging 3 from importlib.machinery import SourceFileLoader 4 from setuptools import setup, find_packages, Command 5 6 _MLFLOW_SKINNY_ENV_VAR = "MLFLOW_SKINNY" 7 8 version = ( 9 SourceFileLoader("mlflow.version", os.path.join("mlflow", "version.py")).load_module().VERSION 10 ) 11 12 13 # Get a list of all files in the directory to include in our module 14 def package_files(directory): 15 paths = [] 16 for path, _, filenames in os.walk(directory): 17 for filename in filenames: 18 paths.append(os.path.join("..", path, filename)) 19 return paths 20 21 22 def is_comment_or_empty(line): 23 stripped = line.strip() 24 return stripped == "" or stripped.startswith("#") 25 26 27 def remove_comments_and_empty_lines(lines): 28 return [line for line in lines if not is_comment_or_empty(line)] 29 30 31 # Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build 32 # to include in the wheel, e.g. "../mlflow/server/js/build/index.html" 33 js_files = package_files("mlflow/server/js/build") 34 models_container_server_files = package_files("mlflow/models/container") 35 alembic_files = [ 36 "../mlflow/store/db_migrations/alembic.ini", 37 "../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini", 38 ] 39 extra_files = [ 40 "pypi_package_index.json", 41 "pyspark/ml/log_model_allowlist.txt", 42 ] 43 recipes_template_files = package_files("mlflow/recipes/resources") 44 recipes_files = package_files("mlflow/recipes/cards/templates") 45 46 47 """ 48 Minimal requirements for the skinny MLflow client which provides a limited 49 subset of functionality such as: RESTful client functionality for Tracking and 50 Model Registry, as well as support for Project execution against local backends 51 and Databricks. 52 """ 53 with open(os.path.join("requirements", "skinny-requirements.txt")) as f: 54 SKINNY_REQUIREMENTS = remove_comments_and_empty_lines(f.read().splitlines()) 55 56 57 """ 58 These are the core requirements for the complete MLflow platform, which augments 59 the skinny client functionality with support for running the MLflow Tracking 60 Server & UI. It also adds project backends such as Docker and Kubernetes among 61 other capabilities. 62 """ 63 with open(os.path.join("requirements", "core-requirements.txt")) as f: 64 CORE_REQUIREMENTS = SKINNY_REQUIREMENTS + remove_comments_and_empty_lines(f.read().splitlines()) 65 66 _is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR)) 67 logging.debug("{} env var is set: {}".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny)) 68 69 70 class ListDependencies(Command): 71 # `python setup.py <command name>` prints out "running <command name>" by default. 72 # This logging message must be hidden by specifying `--quiet` (or `-q`) when piping the output 73 # of this command to `pip install`. 74 description = "List mlflow dependencies" 75 user_options = [ 76 ("skinny", None, "List mlflow-skinny dependencies"), 77 ] 78 79 def initialize_options(self): 80 self.skinny = False 81 82 def finalize_options(self): 83 pass 84 85 def run(self): 86 dependencies = SKINNY_REQUIREMENTS if self.skinny else CORE_REQUIREMENTS 87 print("\n".join(dependencies)) 88 89 90 MINIMUM_SUPPORTED_PYTHON_VERSION = "3.8" 91 92 93 class MinPythonVersion(Command): 94 description = "Print out the minimum supported Python version" 95 user_options = [] 96 97 def initialize_options(self): 98 pass 99 100 def finalize_options(self): 101 pass 102 103 def run(self): 104 print(MINIMUM_SUPPORTED_PYTHON_VERSION) 105 106 107 setup( 108 name="mlflow" if not _is_mlflow_skinny else "mlflow-skinny", 109 version=version, 110 packages=find_packages(exclude=["tests", "tests.*"]), 111 package_data={ 112 "mlflow": ( 113 js_files 114 + models_container_server_files 115 + alembic_files 116 + extra_files 117 + recipes_template_files 118 + recipes_files 119 ), 120 } 121 if not _is_mlflow_skinny 122 # include alembic files to enable usage of the skinny client with SQL databases 123 # if users install sqlalchemy and alembic independently 124 else {"mlflow": alembic_files + extra_files}, 125 install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS, 126 extras_require={ 127 "extras": [ 128 # Required to log artifacts and models to HDFS artifact locations 129 "pyarrow", 130 # Required to sign outgoing request with SigV4 signature 131 "requests-auth-aws-sigv4", 132 # Required to log artifacts and models to AWS S3 artifact locations 133 "boto3", 134 # Required to log artifacts and models to GCS artifact locations 135 "google-cloud-storage>=1.30.0", 136 "azureml-core>=1.2.0", 137 # Required to log artifacts to SFTP artifact locations 138 "pysftp", 139 # Required by the mlflow.projects module, when running projects against 140 # a remote Kubernetes cluster 141 "kubernetes", 142 # Required to serve models through MLServer 143 # NOTE: remove the upper version pin once protobuf is no longer pinned in mlserver 144 # Reference issue: https://github.com/SeldonIO/MLServer/issues/1089 145 "mlserver>=1.2.0,!=1.3.1", 146 "mlserver-mlflow>=1.2.0,!=1.3.1", 147 "virtualenv", 148 # Required for exporting metrics from the MLflow server to Prometheus 149 # as part of the MLflow server monitoring add-on 150 "prometheus-flask-exporter", 151 ], 152 "databricks": [ 153 # Required to write model artifacts to unity catalog locations 154 "azure-storage-file-datalake>12", 155 "google-cloud-storage>=1.30.0", 156 "boto3>1", 157 ], 158 "sqlserver": ["mlflow-dbstore"], 159 "aliyun-oss": ["aliyunstoreplugin"], 160 }, 161 entry_points=""" 162 [console_scripts] 163 mlflow=mlflow.cli:cli 164 165 [mlflow.app] 166 basic-auth=mlflow.server.auth:app 167 """, 168 cmdclass={ 169 "dependencies": ListDependencies, 170 "min_python_version": MinPythonVersion, 171 }, 172 zip_safe=False, 173 author="Databricks", 174 description="MLflow: A Platform for ML Development and Productionization", 175 long_description=open("README.rst").read() 176 if not _is_mlflow_skinny 177 else open("README_SKINNY.rst").read() + open("README.rst").read(), 178 long_description_content_type="text/x-rst", 179 license="Apache License 2.0", 180 classifiers=[ 181 "Intended Audience :: Developers", 182 f"Programming Language :: Python :: {MINIMUM_SUPPORTED_PYTHON_VERSION}", 183 ], 184 keywords="ml ai databricks", 185 url="https://mlflow.org/", 186 python_requires=f">={MINIMUM_SUPPORTED_PYTHON_VERSION}", 187 project_urls={ 188 "Bug Tracker": "https://github.com/mlflow/mlflow/issues", 189 "Documentation": "https://mlflow.org/docs/latest/index.html", 190 "Source Code": "https://github.com/mlflow/mlflow", 191 }, 192 ) 193 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -39,6 +39,7 @@ extra_files = [ "pypi_package_index.json", "pyspark/ml/log_model_allowlist.txt", + "server/auth/basic_auth.ini", ] recipes_template_files = package_files("mlflow/recipes/resources") recipes_files = package_files("mlflow/recipes/cards/templates")
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,6 +39,7 @@\n extra_files = [\n \"pypi_package_index.json\",\n \"pyspark/ml/log_model_allowlist.txt\",\n+ \"server/auth/basic_auth.ini\",\n ]\n recipes_template_files = package_files(\"mlflow/recipes/resources\")\n recipes_files = package_files(\"mlflow/recipes/cards/templates\")\n", "issue": "[BUG] Missing `basic_auth.ini` in PyPI package\n### Issues Policy acknowledgement\n\n- [X] I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md)\n\n### Willingness to contribute\n\nYes. I can contribute a fix for this bug independently.\n\n### MLflow version\n\n- Client: 2.3.2\r\n- Tracking server: 2.3.2\r\n\n\n### System information\n\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\n- **Python version**:\r\n- **yarn version, if running the dev UI**:\r\n\n\n### Describe the problem\n\nFreshly installed mlflow package from PyPI is missing `basic_auth.ini` and cannot launch basic auth app.\r\n\r\n![image](https://github.com/mlflow/mlflow/assets/22888849/7667e308-982d-44ae-b4ce-a21aae387938)\r\n\r\n![image](https://github.com/mlflow/mlflow/assets/22888849/ba7ac8da-ed4b-4318-8fd1-3c2e8bff9689)\r\n\n\n### Tracking information\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```shell\r\nREPLACE_ME\r\n```\r\n\n\n### Code to reproduce issue\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\npip install mlflow\r\nmlflow ui --app-name=basic-auth\r\n```\r\n\n\n### Stack trace\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### Other info / logs\n\n<!-- PLEASE KEEP BACKTICKS AND CHECK PREVIEW -->\r\n```\r\nREPLACE_ME\r\n```\r\n\n\n### What component(s) does this bug affect?\n\n- [ ] `area/artifacts`: Artifact stores and artifact logging\n- [ ] `area/build`: Build and test infrastructure for MLflow\n- [ ] `area/docs`: MLflow documentation pages\n- [ ] `area/examples`: Example code\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\n- [ ] `area/projects`: MLproject format, project running backends\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\n- [ ] `area/server-infra`: MLflow Tracking server backend\n- [X] `area/tracking`: Tracking Service, tracking client APIs, autologging\n\n### What interface(s) does this bug affect?\n\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\n- [ ] `area/windows`: Windows support\n\n### What language(s) does this bug affect?\n\n- [ ] `language/r`: R APIs and clients\n- [ ] `language/java`: Java APIs and clients\n- [ ] `language/new`: Proposals for new client languages\n\n### What integration(s) does this bug affect?\n\n- [ ] `integrations/azure`: Azure and Azure ML integrations\n- [ ] `integrations/sagemaker`: SageMaker integrations\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import os\nimport logging\nfrom importlib.machinery import SourceFileLoader\nfrom setuptools import setup, find_packages, Command\n\n_MLFLOW_SKINNY_ENV_VAR = \"MLFLOW_SKINNY\"\n\nversion = (\n SourceFileLoader(\"mlflow.version\", os.path.join(\"mlflow\", \"version.py\")).load_module().VERSION\n)\n\n\n# Get a list of all files in the directory to include in our module\ndef package_files(directory):\n paths = []\n for path, _, filenames in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join(\"..\", path, filename))\n return paths\n\n\ndef is_comment_or_empty(line):\n stripped = line.strip()\n return stripped == \"\" or stripped.startswith(\"#\")\n\n\ndef remove_comments_and_empty_lines(lines):\n return [line for line in lines if not is_comment_or_empty(line)]\n\n\n# Prints out a set of paths (relative to the mlflow/ directory) of files in mlflow/server/js/build\n# to include in the wheel, e.g. \"../mlflow/server/js/build/index.html\"\njs_files = package_files(\"mlflow/server/js/build\")\nmodels_container_server_files = package_files(\"mlflow/models/container\")\nalembic_files = [\n \"../mlflow/store/db_migrations/alembic.ini\",\n \"../mlflow/temporary_db_migrations_for_pre_1_users/alembic.ini\",\n]\nextra_files = [\n \"pypi_package_index.json\",\n \"pyspark/ml/log_model_allowlist.txt\",\n]\nrecipes_template_files = package_files(\"mlflow/recipes/resources\")\nrecipes_files = package_files(\"mlflow/recipes/cards/templates\")\n\n\n\"\"\"\nMinimal requirements for the skinny MLflow client which provides a limited\nsubset of functionality such as: RESTful client functionality for Tracking and\nModel Registry, as well as support for Project execution against local backends\nand Databricks.\n\"\"\"\nwith open(os.path.join(\"requirements\", \"skinny-requirements.txt\")) as f:\n SKINNY_REQUIREMENTS = remove_comments_and_empty_lines(f.read().splitlines())\n\n\n\"\"\"\nThese are the core requirements for the complete MLflow platform, which augments\nthe skinny client functionality with support for running the MLflow Tracking\nServer & UI. It also adds project backends such as Docker and Kubernetes among\nother capabilities.\n\"\"\"\nwith open(os.path.join(\"requirements\", \"core-requirements.txt\")) as f:\n CORE_REQUIREMENTS = SKINNY_REQUIREMENTS + remove_comments_and_empty_lines(f.read().splitlines())\n\n_is_mlflow_skinny = bool(os.environ.get(_MLFLOW_SKINNY_ENV_VAR))\nlogging.debug(\"{} env var is set: {}\".format(_MLFLOW_SKINNY_ENV_VAR, _is_mlflow_skinny))\n\n\nclass ListDependencies(Command):\n # `python setup.py <command name>` prints out \"running <command name>\" by default.\n # This logging message must be hidden by specifying `--quiet` (or `-q`) when piping the output\n # of this command to `pip install`.\n description = \"List mlflow dependencies\"\n user_options = [\n (\"skinny\", None, \"List mlflow-skinny dependencies\"),\n ]\n\n def initialize_options(self):\n self.skinny = False\n\n def finalize_options(self):\n pass\n\n def run(self):\n dependencies = SKINNY_REQUIREMENTS if self.skinny else CORE_REQUIREMENTS\n print(\"\\n\".join(dependencies))\n\n\nMINIMUM_SUPPORTED_PYTHON_VERSION = \"3.8\"\n\n\nclass MinPythonVersion(Command):\n description = \"Print out the minimum supported Python version\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n print(MINIMUM_SUPPORTED_PYTHON_VERSION)\n\n\nsetup(\n name=\"mlflow\" if not _is_mlflow_skinny else \"mlflow-skinny\",\n version=version,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n package_data={\n \"mlflow\": (\n js_files\n + models_container_server_files\n + alembic_files\n + extra_files\n + recipes_template_files\n + recipes_files\n ),\n }\n if not _is_mlflow_skinny\n # include alembic files to enable usage of the skinny client with SQL databases\n # if users install sqlalchemy and alembic independently\n else {\"mlflow\": alembic_files + extra_files},\n install_requires=CORE_REQUIREMENTS if not _is_mlflow_skinny else SKINNY_REQUIREMENTS,\n extras_require={\n \"extras\": [\n # Required to log artifacts and models to HDFS artifact locations\n \"pyarrow\",\n # Required to sign outgoing request with SigV4 signature\n \"requests-auth-aws-sigv4\",\n # Required to log artifacts and models to AWS S3 artifact locations\n \"boto3\",\n # Required to log artifacts and models to GCS artifact locations\n \"google-cloud-storage>=1.30.0\",\n \"azureml-core>=1.2.0\",\n # Required to log artifacts to SFTP artifact locations\n \"pysftp\",\n # Required by the mlflow.projects module, when running projects against\n # a remote Kubernetes cluster\n \"kubernetes\",\n # Required to serve models through MLServer\n # NOTE: remove the upper version pin once protobuf is no longer pinned in mlserver\n # Reference issue: https://github.com/SeldonIO/MLServer/issues/1089\n \"mlserver>=1.2.0,!=1.3.1\",\n \"mlserver-mlflow>=1.2.0,!=1.3.1\",\n \"virtualenv\",\n # Required for exporting metrics from the MLflow server to Prometheus\n # as part of the MLflow server monitoring add-on\n \"prometheus-flask-exporter\",\n ],\n \"databricks\": [\n # Required to write model artifacts to unity catalog locations\n \"azure-storage-file-datalake>12\",\n \"google-cloud-storage>=1.30.0\",\n \"boto3>1\",\n ],\n \"sqlserver\": [\"mlflow-dbstore\"],\n \"aliyun-oss\": [\"aliyunstoreplugin\"],\n },\n entry_points=\"\"\"\n [console_scripts]\n mlflow=mlflow.cli:cli\n\n [mlflow.app]\n basic-auth=mlflow.server.auth:app\n \"\"\",\n cmdclass={\n \"dependencies\": ListDependencies,\n \"min_python_version\": MinPythonVersion,\n },\n zip_safe=False,\n author=\"Databricks\",\n description=\"MLflow: A Platform for ML Development and Productionization\",\n long_description=open(\"README.rst\").read()\n if not _is_mlflow_skinny\n else open(\"README_SKINNY.rst\").read() + open(\"README.rst\").read(),\n long_description_content_type=\"text/x-rst\",\n license=\"Apache License 2.0\",\n classifiers=[\n \"Intended Audience :: Developers\",\n f\"Programming Language :: Python :: {MINIMUM_SUPPORTED_PYTHON_VERSION}\",\n ],\n keywords=\"ml ai databricks\",\n url=\"https://mlflow.org/\",\n python_requires=f\">={MINIMUM_SUPPORTED_PYTHON_VERSION}\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/mlflow/mlflow/issues\",\n \"Documentation\": \"https://mlflow.org/docs/latest/index.html\",\n \"Source Code\": \"https://github.com/mlflow/mlflow\",\n },\n)\n", "path": "setup.py"}]}
3,388
92
gh_patches_debug_2985
rasdani/github-patches
git_diff
StackStorm__st2-4234
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Missing [workflow_engine] in st2.conf.sample ##### SUMMARY https://github.com/StackStorm/st2/blob/master/conf/st2.conf.sample is missing a new section for `[workflow_engine]` Also, shouldn't this section be named `[workflowengine]` to go along with the "style" of the other sections like `[resultstracker]` , `[garbagecollector]`, etc ##### ISSUE TYPE - Bug Report - Feature Idea ##### STACKSTORM VERSION 2.8 ##### EXPECTED RESULTS https://github.com/StackStorm/st2/blob/master/conf/st2.conf.sample contains a section for `[workflow_engine]` </issue> <code> [start of tools/config_gen.py] 1 #!/usr/bin/env python 2 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 3 # contributor license agreements. See the NOTICE file distributed with 4 # this work for additional information regarding copyright ownership. 5 # The ASF licenses this file to You under the Apache License, Version 2.0 6 # (the "License"); you may not use this file except in compliance with 7 # the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from __future__ import absolute_import 18 import collections 19 import importlib 20 import six 21 import sys 22 import traceback 23 24 from oslo_config import cfg 25 26 27 CONFIGS = ['st2actions.config', 28 'st2actions.notifier.config', 29 'st2actions.resultstracker.config', 30 'st2api.config', 31 'st2stream.config', 32 'st2auth.config', 33 'st2common.config', 34 'st2exporter.config', 35 'st2reactor.rules.config', 36 'st2reactor.sensor.config', 37 'st2reactor.garbage_collector.config'] 38 39 SKIP_GROUPS = ['api_pecan', 'rbac', 'results_tracker'] 40 41 # We group auth options together to nake it a bit more clear what applies where 42 AUTH_OPTIONS = { 43 'common': [ 44 'enable', 45 'mode', 46 'logging', 47 'api_url', 48 'token_ttl', 49 'service_token_ttl', 50 'debug' 51 ], 52 'standalone': [ 53 'host', 54 'port', 55 'use_ssl', 56 'cert', 57 'key', 58 'backend', 59 'backend_kwargs' 60 ] 61 } 62 63 # Some of the config values change depenending on the environment where this script is ran so we 64 # set them to static values to ensure consistent and stable output 65 STATIC_OPTION_VALUES = { 66 'actionrunner': { 67 'virtualenv_binary': '/usr/bin/virtualenv', 68 'python_binary': '/usr/bin/python', 69 'python3_binary': '/usr/bin/python3' 70 }, 71 'webui': { 72 'webui_base_url': 'https://localhost' 73 } 74 } 75 76 COMMON_AUTH_OPTIONS_COMMENT = """ 77 # Common option - options below apply in both scenarios - when auth service is running as a WSGI 78 # service (e.g. under Apache or Nginx) and when it's running in the standalone mode. 79 """.strip() 80 81 STANDALONE_AUTH_OPTIONS_COMMENT = """ 82 # Standalone mode options - options below only apply when auth service is running in the standalone 83 # mode. 84 """.strip() 85 86 87 def _import_config(config): 88 try: 89 return importlib.import_module(config) 90 except: 91 traceback.print_exc() 92 return None 93 94 95 def _read_current_config(opt_groups): 96 for k, v in six.iteritems(cfg.CONF._groups): 97 if k in SKIP_GROUPS: 98 continue 99 if k not in opt_groups: 100 opt_groups[k] = v 101 return opt_groups 102 103 104 def _clear_config(): 105 cfg.CONF.reset() 106 107 108 def _read_group(opt_group): 109 all_options = list(opt_group._opts.values()) 110 111 if opt_group.name == 'auth': 112 print(COMMON_AUTH_OPTIONS_COMMENT) 113 print('') 114 common_options = [option for option in all_options if option['opt'].name in 115 AUTH_OPTIONS['common']] 116 _print_options(opt_group=opt_group, options=common_options) 117 118 print('') 119 print(STANDALONE_AUTH_OPTIONS_COMMENT) 120 print('') 121 standalone_options = [option for option in all_options if option['opt'].name in 122 AUTH_OPTIONS['standalone']] 123 _print_options(opt_group=opt_group, options=standalone_options) 124 125 if len(common_options) + len(standalone_options) != len(all_options): 126 msg = ('Not all options are declared in AUTH_OPTIONS dict, please update it') 127 raise Exception(msg) 128 else: 129 options = all_options 130 _print_options(opt_group=opt_group, options=options) 131 132 133 def _read_groups(opt_groups): 134 opt_groups = collections.OrderedDict(sorted(opt_groups.items())) 135 for name, opt_group in six.iteritems(opt_groups): 136 print('[%s]' % name) 137 _read_group(opt_group) 138 print('') 139 140 141 def _print_options(opt_group, options): 142 for opt in options: 143 opt = opt['opt'] 144 145 # Special case for options which could change during this script run 146 static_option_value = STATIC_OPTION_VALUES.get(opt_group.name, {}).get(opt.name, None) 147 if static_option_value: 148 opt.default = static_option_value 149 150 # Special handling for list options 151 if isinstance(opt, cfg.ListOpt): 152 if opt.default: 153 value = ','.join(opt.default) 154 else: 155 value = '' 156 157 value += ' # comma separated list allowed here.' 158 else: 159 value = opt.default 160 161 print('# %s' % opt.help) 162 print('%s = %s' % (opt.name, value)) 163 164 165 def main(args): 166 opt_groups = {} 167 for config in CONFIGS: 168 mod = _import_config(config) 169 mod.register_opts() 170 _read_current_config(opt_groups) 171 _clear_config() 172 _read_groups(opt_groups) 173 174 175 if __name__ == '__main__': 176 main(sys.argv) 177 [end of tools/config_gen.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tools/config_gen.py b/tools/config_gen.py --- a/tools/config_gen.py +++ b/tools/config_gen.py @@ -27,6 +27,7 @@ CONFIGS = ['st2actions.config', 'st2actions.notifier.config', 'st2actions.resultstracker.config', + 'st2actions.workflows.config', 'st2api.config', 'st2stream.config', 'st2auth.config',
{"golden_diff": "diff --git a/tools/config_gen.py b/tools/config_gen.py\n--- a/tools/config_gen.py\n+++ b/tools/config_gen.py\n@@ -27,6 +27,7 @@\n CONFIGS = ['st2actions.config',\n 'st2actions.notifier.config',\n 'st2actions.resultstracker.config',\n+ 'st2actions.workflows.config',\n 'st2api.config',\n 'st2stream.config',\n 'st2auth.config',\n", "issue": "Missing [workflow_engine] in st2.conf.sample\n##### SUMMARY\r\n\r\nhttps://github.com/StackStorm/st2/blob/master/conf/st2.conf.sample is missing a new section for `[workflow_engine]`\r\n\r\nAlso, shouldn't this section be named `[workflowengine]` to go along with the \"style\" of the other sections like `[resultstracker]` , `[garbagecollector]`, etc\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n - Feature Idea\r\n\r\n##### STACKSTORM VERSION\r\n2.8\r\n\r\n##### EXPECTED RESULTS\r\nhttps://github.com/StackStorm/st2/blob/master/conf/st2.conf.sample contains a section for `[workflow_engine]`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport collections\nimport importlib\nimport six\nimport sys\nimport traceback\n\nfrom oslo_config import cfg\n\n\nCONFIGS = ['st2actions.config',\n 'st2actions.notifier.config',\n 'st2actions.resultstracker.config',\n 'st2api.config',\n 'st2stream.config',\n 'st2auth.config',\n 'st2common.config',\n 'st2exporter.config',\n 'st2reactor.rules.config',\n 'st2reactor.sensor.config',\n 'st2reactor.garbage_collector.config']\n\nSKIP_GROUPS = ['api_pecan', 'rbac', 'results_tracker']\n\n# We group auth options together to nake it a bit more clear what applies where\nAUTH_OPTIONS = {\n 'common': [\n 'enable',\n 'mode',\n 'logging',\n 'api_url',\n 'token_ttl',\n 'service_token_ttl',\n 'debug'\n ],\n 'standalone': [\n 'host',\n 'port',\n 'use_ssl',\n 'cert',\n 'key',\n 'backend',\n 'backend_kwargs'\n ]\n}\n\n# Some of the config values change depenending on the environment where this script is ran so we\n# set them to static values to ensure consistent and stable output\nSTATIC_OPTION_VALUES = {\n 'actionrunner': {\n 'virtualenv_binary': '/usr/bin/virtualenv',\n 'python_binary': '/usr/bin/python',\n 'python3_binary': '/usr/bin/python3'\n },\n 'webui': {\n 'webui_base_url': 'https://localhost'\n }\n}\n\nCOMMON_AUTH_OPTIONS_COMMENT = \"\"\"\n# Common option - options below apply in both scenarios - when auth service is running as a WSGI\n# service (e.g. under Apache or Nginx) and when it's running in the standalone mode.\n\"\"\".strip()\n\nSTANDALONE_AUTH_OPTIONS_COMMENT = \"\"\"\n# Standalone mode options - options below only apply when auth service is running in the standalone\n# mode.\n\"\"\".strip()\n\n\ndef _import_config(config):\n try:\n return importlib.import_module(config)\n except:\n traceback.print_exc()\n return None\n\n\ndef _read_current_config(opt_groups):\n for k, v in six.iteritems(cfg.CONF._groups):\n if k in SKIP_GROUPS:\n continue\n if k not in opt_groups:\n opt_groups[k] = v\n return opt_groups\n\n\ndef _clear_config():\n cfg.CONF.reset()\n\n\ndef _read_group(opt_group):\n all_options = list(opt_group._opts.values())\n\n if opt_group.name == 'auth':\n print(COMMON_AUTH_OPTIONS_COMMENT)\n print('')\n common_options = [option for option in all_options if option['opt'].name in\n AUTH_OPTIONS['common']]\n _print_options(opt_group=opt_group, options=common_options)\n\n print('')\n print(STANDALONE_AUTH_OPTIONS_COMMENT)\n print('')\n standalone_options = [option for option in all_options if option['opt'].name in\n AUTH_OPTIONS['standalone']]\n _print_options(opt_group=opt_group, options=standalone_options)\n\n if len(common_options) + len(standalone_options) != len(all_options):\n msg = ('Not all options are declared in AUTH_OPTIONS dict, please update it')\n raise Exception(msg)\n else:\n options = all_options\n _print_options(opt_group=opt_group, options=options)\n\n\ndef _read_groups(opt_groups):\n opt_groups = collections.OrderedDict(sorted(opt_groups.items()))\n for name, opt_group in six.iteritems(opt_groups):\n print('[%s]' % name)\n _read_group(opt_group)\n print('')\n\n\ndef _print_options(opt_group, options):\n for opt in options:\n opt = opt['opt']\n\n # Special case for options which could change during this script run\n static_option_value = STATIC_OPTION_VALUES.get(opt_group.name, {}).get(opt.name, None)\n if static_option_value:\n opt.default = static_option_value\n\n # Special handling for list options\n if isinstance(opt, cfg.ListOpt):\n if opt.default:\n value = ','.join(opt.default)\n else:\n value = ''\n\n value += ' # comma separated list allowed here.'\n else:\n value = opt.default\n\n print('# %s' % opt.help)\n print('%s = %s' % (opt.name, value))\n\n\ndef main(args):\n opt_groups = {}\n for config in CONFIGS:\n mod = _import_config(config)\n mod.register_opts()\n _read_current_config(opt_groups)\n _clear_config()\n _read_groups(opt_groups)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "path": "tools/config_gen.py"}]}
2,290
99
gh_patches_debug_24013
rasdani/github-patches
git_diff
chainer__chainer-6581
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [document] The input argument name in `F.rrelu` is inconsistent https://docs.chainer.org/en/stable/reference/generated/chainer.functions.rrelu.html#chainer.functions.rrelu The slope for x < 0 is named `a` in the body text, but `r` is used for the input argument. > Randomized Leaky Rectified Liner Unit function. > This function is expressed as > f(x)=max(x,ax), > where a is a random number sampled from a uniform distribution U(l,u). > ... > ・r (N-dimensional array or None) – The r to be used for rrelu. The shape and dtype must be the same as x[0] and should be on the same device. If r is not specified or set to None, a r will be generated randomly according to the given l and u. If r is specified, l and u will be ignored. </issue> <code> [start of chainer/functions/activation/rrelu.py] 1 import numpy as np 2 3 import chainer 4 from chainer.backends import cuda 5 from chainer import function_node 6 from chainer.utils import argument 7 from chainer.utils import type_check 8 9 10 def _kern(): 11 return cuda.elementwise( 12 'T cond, T x, T slope', 'T y', 13 'y = cond >= 0 ? x : (T)(slope * x)', 'rrelu') 14 15 16 class RReLU(function_node.FunctionNode): 17 """Randomized Leaky rectifier unit.""" 18 19 def __init__(self, lower=1. / 8, upper=1. / 3, r=None): 20 if not 0.0 <= lower < 1.0: 21 raise ValueError('lower must be in the range [0, 1)') 22 if not 0.0 <= upper < 1.0: 23 raise ValueError('upper must be in the range [0, 1)') 24 if not lower < upper: 25 raise ValueError('lower must be less than upper') 26 self.lower = lower 27 self.upper = upper 28 self.r = r 29 30 def check_type_forward(self, in_types): 31 type_check.expect(in_types.size() == 1) 32 x_type, = in_types 33 type_check.expect(x_type.dtype.kind == 'f') 34 if self.r is not None: 35 type_check.expect(x_type.dtype == self.r.dtype) 36 type_check.expect(x_type.shape == self.r.shape) 37 38 def forward_cpu(self, inputs): 39 x, = inputs 40 if chainer.config.train: 41 if self.r is None: 42 self.r = np.random.uniform( 43 self.lower, self.upper, x.shape 44 ).astype(x.dtype, copy=False) 45 else: 46 self.r = np.full( 47 x.shape, (self.lower + self.upper) / 2, dtype=x.dtype) 48 y = np.where(x >= 0, x, x * self.r) 49 self.retain_outputs((0,)) 50 return y, 51 52 def forward_gpu(self, inputs): 53 x, = inputs 54 xp = cuda.cupy 55 if chainer.config.train: 56 if self.r is None: 57 self.r = xp.random.uniform( 58 self.lower, self.upper, x.shape 59 ).astype(x.dtype, copy=False) 60 else: 61 self.r = xp.full( 62 x.shape, (self.lower + self.upper) / 2, dtype=x.dtype) 63 y = _kern()(x, x, self.r) 64 self.retain_outputs((0,)) 65 return y, 66 67 def backward(self, indexes, grad_outputs): 68 y = self.get_retained_outputs()[0].data 69 return _RReLUGrad(y, self.r).apply(grad_outputs) 70 71 72 class _RReLUGrad(function_node.FunctionNode): 73 74 def __init__(self, y, r): 75 self.r = r 76 self.y = y 77 78 def forward_cpu(self, inputs): 79 gy, = inputs 80 gy = np.where(self.y >= 0, gy, gy * self.r) 81 return gy, 82 83 def forward_gpu(self, inputs): 84 gy, = inputs 85 gy = _kern()(self.y, gy, self.r) 86 return gy, 87 88 def backward(self, indexes, grad_outputs): 89 return _RReLUGrad(self.y, self.r).apply(grad_outputs) 90 91 92 def rrelu(x, l=1. / 8, u=1. / 3, **kwargs): 93 """rrelu(x, l=1. / 8, u=1. / 3, *, r=None, return_r=False) 94 95 Randomized Leaky Rectified Liner Unit function. 96 97 This function is expressed as 98 99 .. math:: f(x)=\\max(x, ax), 100 101 where :math:`a` is a random number sampled from a uniform distribution 102 :math:`U(l, u)`. 103 104 See: https://arxiv.org/pdf/1505.00853.pdf 105 106 Args: 107 x (:class:`~chainer.Variable` or :ref:`ndarray`): 108 Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array. 109 l (float): The lower bound of the uniform distribution. 110 u (float): The upper bound of the uniform distribution. 111 r (:ref:`ndarray` or None): 112 The r to be used for rrelu. 113 The shape and dtype must be the same as ``x[0]`` and should be on 114 the same device. 115 If ``r`` is not specified or set to ``None``, a ``r`` will be 116 generated randomly according to the given ``l`` and ``u``. 117 If ``r`` is specified, ``l`` and ``u`` will be ignored. 118 return_r (bool): 119 If ``True``, the r used for rrelu is returned altogether with 120 the output variable. 121 The returned ``r`` can latter be reused by passing it to ``r`` 122 argument. 123 124 Returns: 125 ~chainer.Variable or tuple: 126 When ``return_r`` is ``False`` (default), return the output 127 variable. Otherwise returnes the tuple of the output variable and 128 ``r`` (:ref:`ndarray`). The ``r`` will be on the same device as 129 the input. 130 A :math:`(s_1, s_2, ..., s_N)`-shaped float array. 131 132 .. admonition:: Example 133 134 >>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32) 135 >>> x 136 array([[-1., 0.], 137 [ 2., -3.], 138 [-2., 1.]], dtype=float32) 139 >>> F.rrelu(x).array # doctest: +SKIP 140 array([[-0.24850948, 0. ], 141 [ 2. , -0.50844127], 142 [-0.598535 , 1. ]], dtype=float32) 143 """ 144 r = None 145 return_r = False 146 if kwargs: 147 r, return_r = argument.parse_kwargs( 148 kwargs, ('r', r), ('return_r', r), 149 train='train argument is not supported anymore.' 150 'Use chainer.using_config') 151 152 func = RReLU(l, u, r) 153 out, = func.apply((x,)) 154 r = func.r 155 156 if return_r: 157 return out, r 158 return out 159 [end of chainer/functions/activation/rrelu.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/activation/rrelu.py b/chainer/functions/activation/rrelu.py --- a/chainer/functions/activation/rrelu.py +++ b/chainer/functions/activation/rrelu.py @@ -96,12 +96,15 @@ This function is expressed as - .. math:: f(x)=\\max(x, ax), + .. math:: f(x)=\\max(x, rx), - where :math:`a` is a random number sampled from a uniform distribution + where :math:`r` is a random number sampled from a uniform distribution :math:`U(l, u)`. - See: https://arxiv.org/pdf/1505.00853.pdf + .. note:: + + The :math:`r` corresponds to :math:`a` in the original + paper (https://arxiv.org/pdf/1505.00853.pdf). Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): @@ -112,7 +115,7 @@ The r to be used for rrelu. The shape and dtype must be the same as ``x[0]`` and should be on the same device. - If ``r`` is not specified or set to ``None``, a ``r`` will be + If ``r`` is not specified or set to ``None``, an ``r`` will be generated randomly according to the given ``l`` and ``u``. If ``r`` is specified, ``l`` and ``u`` will be ignored. return_r (bool):
{"golden_diff": "diff --git a/chainer/functions/activation/rrelu.py b/chainer/functions/activation/rrelu.py\n--- a/chainer/functions/activation/rrelu.py\n+++ b/chainer/functions/activation/rrelu.py\n@@ -96,12 +96,15 @@\n \n This function is expressed as\n \n- .. math:: f(x)=\\\\max(x, ax),\n+ .. math:: f(x)=\\\\max(x, rx),\n \n- where :math:`a` is a random number sampled from a uniform distribution\n+ where :math:`r` is a random number sampled from a uniform distribution\n :math:`U(l, u)`.\n \n- See: https://arxiv.org/pdf/1505.00853.pdf\n+ .. note::\n+\n+ The :math:`r` corresponds to :math:`a` in the original\n+ paper (https://arxiv.org/pdf/1505.00853.pdf).\n \n Args:\n x (:class:`~chainer.Variable` or :ref:`ndarray`):\n@@ -112,7 +115,7 @@\n The r to be used for rrelu.\n The shape and dtype must be the same as ``x[0]`` and should be on\n the same device.\n- If ``r`` is not specified or set to ``None``, a ``r`` will be\n+ If ``r`` is not specified or set to ``None``, an ``r`` will be\n generated randomly according to the given ``l`` and ``u``.\n If ``r`` is specified, ``l`` and ``u`` will be ignored.\n return_r (bool):\n", "issue": "[document] The input argument name in `F.rrelu` is inconsistent\nhttps://docs.chainer.org/en/stable/reference/generated/chainer.functions.rrelu.html#chainer.functions.rrelu\r\n\r\nThe slope for x < 0 is named `a` in the body text, but `r` is used for the input argument.\r\n\r\n> Randomized Leaky Rectified Liner Unit function.\r\n> This function is expressed as\r\n> f(x)=max(x,ax),\r\n> where a is a random number sampled from a uniform distribution U(l,u).\r\n> ...\r\n> \u30fbr (N-dimensional array or None) \u2013 The r to be used for rrelu. The shape and dtype must be the same as x[0] and should be on the same device. If r is not specified or set to None, a r will be generated randomly according to the given l and u. If r is specified, l and u will be ignored.\r\n\n", "before_files": [{"content": "import numpy as np\n\nimport chainer\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import argument\nfrom chainer.utils import type_check\n\n\ndef _kern():\n return cuda.elementwise(\n 'T cond, T x, T slope', 'T y',\n 'y = cond >= 0 ? x : (T)(slope * x)', 'rrelu')\n\n\nclass RReLU(function_node.FunctionNode):\n \"\"\"Randomized Leaky rectifier unit.\"\"\"\n\n def __init__(self, lower=1. / 8, upper=1. / 3, r=None):\n if not 0.0 <= lower < 1.0:\n raise ValueError('lower must be in the range [0, 1)')\n if not 0.0 <= upper < 1.0:\n raise ValueError('upper must be in the range [0, 1)')\n if not lower < upper:\n raise ValueError('lower must be less than upper')\n self.lower = lower\n self.upper = upper\n self.r = r\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n type_check.expect(x_type.dtype.kind == 'f')\n if self.r is not None:\n type_check.expect(x_type.dtype == self.r.dtype)\n type_check.expect(x_type.shape == self.r.shape)\n\n def forward_cpu(self, inputs):\n x, = inputs\n if chainer.config.train:\n if self.r is None:\n self.r = np.random.uniform(\n self.lower, self.upper, x.shape\n ).astype(x.dtype, copy=False)\n else:\n self.r = np.full(\n x.shape, (self.lower + self.upper) / 2, dtype=x.dtype)\n y = np.where(x >= 0, x, x * self.r)\n self.retain_outputs((0,))\n return y,\n\n def forward_gpu(self, inputs):\n x, = inputs\n xp = cuda.cupy\n if chainer.config.train:\n if self.r is None:\n self.r = xp.random.uniform(\n self.lower, self.upper, x.shape\n ).astype(x.dtype, copy=False)\n else:\n self.r = xp.full(\n x.shape, (self.lower + self.upper) / 2, dtype=x.dtype)\n y = _kern()(x, x, self.r)\n self.retain_outputs((0,))\n return y,\n\n def backward(self, indexes, grad_outputs):\n y = self.get_retained_outputs()[0].data\n return _RReLUGrad(y, self.r).apply(grad_outputs)\n\n\nclass _RReLUGrad(function_node.FunctionNode):\n\n def __init__(self, y, r):\n self.r = r\n self.y = y\n\n def forward_cpu(self, inputs):\n gy, = inputs\n gy = np.where(self.y >= 0, gy, gy * self.r)\n return gy,\n\n def forward_gpu(self, inputs):\n gy, = inputs\n gy = _kern()(self.y, gy, self.r)\n return gy,\n\n def backward(self, indexes, grad_outputs):\n return _RReLUGrad(self.y, self.r).apply(grad_outputs)\n\n\ndef rrelu(x, l=1. / 8, u=1. / 3, **kwargs):\n \"\"\"rrelu(x, l=1. / 8, u=1. / 3, *, r=None, return_r=False)\n\n Randomized Leaky Rectified Liner Unit function.\n\n This function is expressed as\n\n .. math:: f(x)=\\\\max(x, ax),\n\n where :math:`a` is a random number sampled from a uniform distribution\n :math:`U(l, u)`.\n\n See: https://arxiv.org/pdf/1505.00853.pdf\n\n Args:\n x (:class:`~chainer.Variable` or :ref:`ndarray`):\n Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n l (float): The lower bound of the uniform distribution.\n u (float): The upper bound of the uniform distribution.\n r (:ref:`ndarray` or None):\n The r to be used for rrelu.\n The shape and dtype must be the same as ``x[0]`` and should be on\n the same device.\n If ``r`` is not specified or set to ``None``, a ``r`` will be\n generated randomly according to the given ``l`` and ``u``.\n If ``r`` is specified, ``l`` and ``u`` will be ignored.\n return_r (bool):\n If ``True``, the r used for rrelu is returned altogether with\n the output variable.\n The returned ``r`` can latter be reused by passing it to ``r``\n argument.\n\n Returns:\n ~chainer.Variable or tuple:\n When ``return_r`` is ``False`` (default), return the output\n variable. Otherwise returnes the tuple of the output variable and\n ``r`` (:ref:`ndarray`). The ``r`` will be on the same device as\n the input.\n A :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n\n .. admonition:: Example\n\n >>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)\n >>> x\n array([[-1., 0.],\n [ 2., -3.],\n [-2., 1.]], dtype=float32)\n >>> F.rrelu(x).array # doctest: +SKIP\n array([[-0.24850948, 0. ],\n [ 2. , -0.50844127],\n [-0.598535 , 1. ]], dtype=float32)\n \"\"\"\n r = None\n return_r = False\n if kwargs:\n r, return_r = argument.parse_kwargs(\n kwargs, ('r', r), ('return_r', r),\n train='train argument is not supported anymore.'\n 'Use chainer.using_config')\n\n func = RReLU(l, u, r)\n out, = func.apply((x,))\n r = func.r\n\n if return_r:\n return out, r\n return out\n", "path": "chainer/functions/activation/rrelu.py"}]}
2,541
370
gh_patches_debug_30559
rasdani/github-patches
git_diff
Gallopsled__pwntools-343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pwnlib.term.text throws exceptions on attribute accesses ``` >>> import pwnlib >>> pwnlib.term.text.__lol__ Traceback (most recent call last): File "<stdin>", line 1, in <module> File "pwnlib/term/text.py", line 99, in __getattr__ init += self._fg_color(c()) File "pwnlib/term/text.py", line 93, in c return self._colors[c] + bright KeyError: '' ``` </issue> <code> [start of pwnlib/term/text.py] 1 import types, sys, functools 2 from . import termcap 3 4 def eval_when(when): 5 if isinstance(when, file) or \ 6 when in ('always', 'never', 'auto', sys.stderr, sys.stdout): 7 if when == 'always': 8 return True 9 elif when == 'never': 10 return False 11 elif when == 'auto': 12 return sys.stdout.isatty() 13 else: 14 return when.isatty() 15 else: 16 raise ValueError('text.when: must be a file-object or "always", "never" or "auto"') 17 18 class Module(types.ModuleType): 19 def __init__(self): 20 self.__file__ = __file__ 21 self.__name__ = __name__ 22 self.num_colors = termcap.get('colors', default = 8) 23 self.has_bright = self.num_colors >= 16 24 self.has_gray = self.has_bright 25 self.when = 'auto' 26 self._colors = { 27 'black': 0, 28 'red': 1, 29 'green': 2, 30 'yellow': 3, 31 'blue': 4, 32 'magenta': 5, 33 'cyan': 6, 34 'white': 7, 35 } 36 self._reset = '\x1b[m' 37 self._attributes = {} 38 for x, y in [('italic' , 'sitm'), 39 ('bold' , 'bold'), 40 ('underline', 'smul'), 41 ('reverse' , 'rev')]: 42 s = termcap.get(y) 43 self._attributes[x] = s 44 self._cache = {} 45 46 @property 47 def when(self): 48 return self._when 49 50 @when.setter 51 def when(self, val): 52 self._when = eval_when(val) 53 54 def _fg_color(self, c): 55 return termcap.get('setaf', c) or termcap.get('setf', c) 56 57 def _bg_color(self, c): 58 return termcap.get('setab', c) or termcap.get('setb', c) 59 60 def _decorator(self, desc, init): 61 def f(self, s, when = None): 62 if when: 63 if eval_when(when): 64 return init + s + self._reset 65 else: 66 return s 67 else: 68 if self.when: 69 return init + s + self._reset 70 else: 71 return s 72 setattr(Module, desc, f) 73 return functools.partial(f, self) 74 75 def __getattr__(self, desc): 76 ds = desc.replace('gray', 'bright_black').split('_') 77 init = '' 78 while ds: 79 d = ds[0] 80 try: 81 init += self._attributes[d] 82 ds.pop(0) 83 except KeyError: 84 break 85 def c(): 86 bright = 0 87 c = ds.pop(0) 88 if c == 'bright': 89 c = ds.pop(0) 90 if self.has_bright: 91 bright = 8 92 return self._colors[c] + bright 93 if ds: 94 if ds[0] == 'on': 95 ds.pop(0) 96 init += self._bg_color(c()) 97 else: 98 init += self._fg_color(c()) 99 if len(ds): 100 assert ds.pop(0) == 'on' 101 init += self._bg_color(c()) 102 return self._decorator(desc, init) 103 104 def get(self, desc): 105 return self.__getattr__(desc) 106 107 tether = sys.modules[__name__] 108 sys.modules[__name__] = Module() 109 [end of pwnlib/term/text.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwnlib/term/text.py b/pwnlib/term/text.py --- a/pwnlib/term/text.py +++ b/pwnlib/term/text.py @@ -73,33 +73,36 @@ return functools.partial(f, self) def __getattr__(self, desc): - ds = desc.replace('gray', 'bright_black').split('_') - init = '' - while ds: - d = ds[0] - try: - init += self._attributes[d] - ds.pop(0) - except KeyError: - break - def c(): - bright = 0 - c = ds.pop(0) - if c == 'bright': + try: + ds = desc.replace('gray', 'bright_black').split('_') + init = '' + while ds: + d = ds[0] + try: + init += self._attributes[d] + ds.pop(0) + except KeyError: + break + def c(): + bright = 0 c = ds.pop(0) - if self.has_bright: - bright = 8 - return self._colors[c] + bright - if ds: - if ds[0] == 'on': - ds.pop(0) - init += self._bg_color(c()) - else: - init += self._fg_color(c()) - if len(ds): - assert ds.pop(0) == 'on' + if c == 'bright': + c = ds.pop(0) + if self.has_bright: + bright = 8 + return self._colors[c] + bright + if ds: + if ds[0] == 'on': + ds.pop(0) init += self._bg_color(c()) - return self._decorator(desc, init) + else: + init += self._fg_color(c()) + if len(ds): + assert ds.pop(0) == 'on' + init += self._bg_color(c()) + return self._decorator(desc, init) + except (IndexError, KeyError): + raise AttributeError("'module' object has no attribute %r" % desc) def get(self, desc): return self.__getattr__(desc)
{"golden_diff": "diff --git a/pwnlib/term/text.py b/pwnlib/term/text.py\n--- a/pwnlib/term/text.py\n+++ b/pwnlib/term/text.py\n@@ -73,33 +73,36 @@\n return functools.partial(f, self)\n \n def __getattr__(self, desc):\n- ds = desc.replace('gray', 'bright_black').split('_')\n- init = ''\n- while ds:\n- d = ds[0]\n- try:\n- init += self._attributes[d]\n- ds.pop(0)\n- except KeyError:\n- break\n- def c():\n- bright = 0\n- c = ds.pop(0)\n- if c == 'bright':\n+ try:\n+ ds = desc.replace('gray', 'bright_black').split('_')\n+ init = ''\n+ while ds:\n+ d = ds[0]\n+ try:\n+ init += self._attributes[d]\n+ ds.pop(0)\n+ except KeyError:\n+ break\n+ def c():\n+ bright = 0\n c = ds.pop(0)\n- if self.has_bright:\n- bright = 8\n- return self._colors[c] + bright\n- if ds:\n- if ds[0] == 'on':\n- ds.pop(0)\n- init += self._bg_color(c())\n- else:\n- init += self._fg_color(c())\n- if len(ds):\n- assert ds.pop(0) == 'on'\n+ if c == 'bright':\n+ c = ds.pop(0)\n+ if self.has_bright:\n+ bright = 8\n+ return self._colors[c] + bright\n+ if ds:\n+ if ds[0] == 'on':\n+ ds.pop(0)\n init += self._bg_color(c())\n- return self._decorator(desc, init)\n+ else:\n+ init += self._fg_color(c())\n+ if len(ds):\n+ assert ds.pop(0) == 'on'\n+ init += self._bg_color(c())\n+ return self._decorator(desc, init)\n+ except (IndexError, KeyError):\n+ raise AttributeError(\"'module' object has no attribute %r\" % desc)\n \n def get(self, desc):\n return self.__getattr__(desc)\n", "issue": "pwnlib.term.text throws exceptions on attribute accesses\n```\n>>> import pwnlib\n>>> pwnlib.term.text.__lol__\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"pwnlib/term/text.py\", line 99, in __getattr__\n init += self._fg_color(c())\n File \"pwnlib/term/text.py\", line 93, in c\n return self._colors[c] + bright\nKeyError: ''\n```\n\n", "before_files": [{"content": "import types, sys, functools\nfrom . import termcap\n\ndef eval_when(when):\n if isinstance(when, file) or \\\n when in ('always', 'never', 'auto', sys.stderr, sys.stdout):\n if when == 'always':\n return True\n elif when == 'never':\n return False\n elif when == 'auto':\n return sys.stdout.isatty()\n else:\n return when.isatty()\n else:\n raise ValueError('text.when: must be a file-object or \"always\", \"never\" or \"auto\"')\n\nclass Module(types.ModuleType):\n def __init__(self):\n self.__file__ = __file__\n self.__name__ = __name__\n self.num_colors = termcap.get('colors', default = 8)\n self.has_bright = self.num_colors >= 16\n self.has_gray = self.has_bright\n self.when = 'auto'\n self._colors = {\n 'black': 0,\n 'red': 1,\n 'green': 2,\n 'yellow': 3,\n 'blue': 4,\n 'magenta': 5,\n 'cyan': 6,\n 'white': 7,\n }\n self._reset = '\\x1b[m'\n self._attributes = {}\n for x, y in [('italic' , 'sitm'),\n ('bold' , 'bold'),\n ('underline', 'smul'),\n ('reverse' , 'rev')]:\n s = termcap.get(y)\n self._attributes[x] = s\n self._cache = {}\n\n @property\n def when(self):\n return self._when\n\n @when.setter\n def when(self, val):\n self._when = eval_when(val)\n\n def _fg_color(self, c):\n return termcap.get('setaf', c) or termcap.get('setf', c)\n\n def _bg_color(self, c):\n return termcap.get('setab', c) or termcap.get('setb', c)\n\n def _decorator(self, desc, init):\n def f(self, s, when = None):\n if when:\n if eval_when(when):\n return init + s + self._reset\n else:\n return s\n else:\n if self.when:\n return init + s + self._reset\n else:\n return s\n setattr(Module, desc, f)\n return functools.partial(f, self)\n\n def __getattr__(self, desc):\n ds = desc.replace('gray', 'bright_black').split('_')\n init = ''\n while ds:\n d = ds[0]\n try:\n init += self._attributes[d]\n ds.pop(0)\n except KeyError:\n break\n def c():\n bright = 0\n c = ds.pop(0)\n if c == 'bright':\n c = ds.pop(0)\n if self.has_bright:\n bright = 8\n return self._colors[c] + bright\n if ds:\n if ds[0] == 'on':\n ds.pop(0)\n init += self._bg_color(c())\n else:\n init += self._fg_color(c())\n if len(ds):\n assert ds.pop(0) == 'on'\n init += self._bg_color(c())\n return self._decorator(desc, init)\n\n def get(self, desc):\n return self.__getattr__(desc)\n\ntether = sys.modules[__name__]\nsys.modules[__name__] = Module()\n", "path": "pwnlib/term/text.py"}]}
1,640
523
gh_patches_debug_26790
rasdani/github-patches
git_diff
sql-machine-learning__elasticdl-1101
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python version support I run the unit tests in a local environment, with Python 3.7.4 https://github.com/wangkuiyi/elasticdl/blob/424a9b45999fe77eb671d7cfe86d7e22130f263b/elasticdl/python/common/odps_recordio_conversion_utils.py#L52-L56 I find that it throws an exception. This is an interface change from Python 3.6 to Python 3.7 Please refer to https://docs.python.org/3/library/collections.html#collections.namedtuple ``` Changed in version 3.1: Added support for rename. Changed in version 3.6: The verbose and rename parameters became keyword-only arguments. Changed in version 3.6: Added the module parameter. Changed in version 3.7: Remove the verbose parameter and the _source attribute. Changed in version 3.7: Added the defaults parameter and the _field_defaults attribute. ``` Here is just a record. </issue> <code> [start of elasticdl/python/common/odps_recordio_conversion_utils.py] 1 import os 2 from collections import OrderedDict, namedtuple 3 from datetime import datetime as _datetime 4 5 import numpy as np 6 import recordio 7 import tensorflow as tf 8 from odps.types import ( 9 Bigint, 10 Binary, 11 Boolean, 12 Datetime, 13 Double, 14 Float, 15 Int, 16 Smallint, 17 String, 18 Tinyint, 19 ) 20 21 22 def _infer_primitive_data_type(value): 23 """Infer the primitive data type for an ODPS value.""" 24 integer_builtins = (int, np.integer) 25 float_builtins = (float, np.float) 26 27 odps_primitive_to_builtin_types = OrderedDict( 28 ( 29 (Bigint(), integer_builtins), 30 (Tinyint(), integer_builtins), 31 (Smallint(), integer_builtins), 32 (Int(), integer_builtins), 33 (Double(), float_builtins), 34 (Float(), float_builtins), 35 (String(), (str, bytes)), 36 (Binary(), bytes), 37 (Datetime(), _datetime), 38 (Boolean(), bool), 39 ) 40 ) 41 for data_type, builtin_types in odps_primitive_to_builtin_types.items(): 42 if isinstance(value, builtin_types): 43 return builtin_types 44 else: 45 return None 46 47 48 def _find_features_indices( 49 features_list, int_features, float_features, bytes_features 50 ): 51 """Finds the indices for different types of features.""" 52 FeatureIndices = namedtuple( 53 "FeatureIndices", 54 ["int_features", "float_features", "bytes_features"], 55 verbose=False, 56 ) 57 int_features_indices = [features_list.index(key) for key in int_features] 58 float_features_indices = [ 59 features_list.index(key) for key in float_features 60 ] 61 bytes_features_indices = [ 62 features_list.index(key) for key in bytes_features 63 ] 64 return FeatureIndices( 65 int_features_indices, float_features_indices, bytes_features_indices 66 ) 67 68 69 def _parse_row_to_example(record, features_list, feature_indices): 70 """ 71 Parses one row (a flat list or one-dimensional numpy array) 72 to a TensorFlow Example. 73 """ 74 if isinstance(record, list): 75 record = np.array(record, dtype=object) 76 77 example = tf.train.Example() 78 # Note: these cannot be constructed dynamically since 79 # we cannot assign a value to an embedded message 80 # field in protobuf 81 for feature_ind in feature_indices.int_features: 82 example.features.feature[ 83 features_list[feature_ind] 84 ].int64_list.value.append( 85 int(_maybe_encode_unicode_string(record[feature_ind]) or 0) 86 ) 87 for feature_ind in feature_indices.float_features: 88 example.features.feature[ 89 features_list[feature_ind] 90 ].float_list.value.append( 91 float(_maybe_encode_unicode_string(record[feature_ind]) or 0.0) 92 ) 93 for feature_ind in feature_indices.bytes_features: 94 example.features.feature[ 95 features_list[feature_ind] 96 ].bytes_list.value.append( 97 _maybe_encode_unicode_string(record[feature_ind]) 98 ) 99 return example 100 101 102 def _maybe_encode_unicode_string(record): 103 """Encodes unicode strings if needed.""" 104 if isinstance(record, str): 105 record = bytes(record, "utf-8").strip() 106 return record 107 108 109 def _find_feature_indices_from_record(record): 110 """Find the indices of different feature types.""" 111 feature_types = [type(value) for value in record] 112 FeatureIndices = namedtuple( 113 "FeatureIndices", 114 ["int_features", "float_features", "bytes_features"], 115 verbose=False, 116 ) 117 return FeatureIndices( 118 [i for i, x in enumerate(feature_types) if x == int], 119 [i for i, x in enumerate(feature_types) if x == float], 120 [i for i, x in enumerate(feature_types) if x == str], 121 ) 122 123 124 def write_recordio_shards_from_iterator( 125 records_iter, features_list, output_dir, records_per_shard 126 ): 127 """Writes RecordIO files from Python iterator of numpy arrays.""" 128 # Take the first record batch to check whether it contains multiple items 129 first_record_batch = next(records_iter) 130 is_first_record_batch_consumed = False 131 is_multi_items_per_batch = any( 132 isinstance(i, list) for i in first_record_batch 133 ) 134 135 # Find the features of different types that will be used 136 # in `_parse_row_to_example()` later 137 record = ( 138 first_record_batch[0] 139 if is_multi_items_per_batch 140 else first_record_batch 141 ) 142 feature_indices = _find_feature_indices_from_record(record) 143 144 writer = None 145 rows_written = 0 146 shards_written = 0 147 while True: 148 try: 149 # Make sure to consume the first record batch 150 if is_first_record_batch_consumed: 151 record_batch = next(records_iter) 152 else: 153 record_batch = first_record_batch 154 is_first_record_batch_consumed = True 155 if not is_multi_items_per_batch: 156 record_batch = [record_batch] 157 158 # Write each record in the batch to a RecordIO shard 159 for record in record_batch: 160 # Initialize the writer for the new shard 161 if rows_written % records_per_shard == 0: 162 if writer is not None: 163 writer.close() 164 shard_file_path = os.path.join( 165 output_dir, "data-%05d" % shards_written 166 ) 167 writer = recordio.Writer(shard_file_path) 168 shards_written += 1 169 170 writer.write( 171 _parse_row_to_example( 172 record, features_list, feature_indices 173 ).SerializeToString() 174 ) 175 rows_written += 1 176 except StopIteration: 177 break 178 179 writer.close() 180 [end of elasticdl/python/common/odps_recordio_conversion_utils.py] [start of setup.py] 1 from setuptools import setup, find_packages 2 3 4 with open('elasticdl/requirements.txt') as f: 5 requirements = f.read().splitlines() 6 7 setup( 8 name="elasticdl", 9 version="0.0.1", 10 description="A Kubernetes-native Elastic Deep Learning Framework", 11 author="Ant Financial", 12 url="https://github.com/wangkuiyi/elasticdl", 13 install_requires=requirements, 14 packages=find_packages(exclude=["*test*"]), 15 package_data={"": ["proto/elasticdl.proto", "docker/*", "Makefile"]}, 16 entry_points={ 17 "console_scripts": ["elasticdl=elasticdl.python.elasticdl.client:main"] 18 }, 19 ) 20 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticdl/python/common/odps_recordio_conversion_utils.py b/elasticdl/python/common/odps_recordio_conversion_utils.py --- a/elasticdl/python/common/odps_recordio_conversion_utils.py +++ b/elasticdl/python/common/odps_recordio_conversion_utils.py @@ -50,9 +50,7 @@ ): """Finds the indices for different types of features.""" FeatureIndices = namedtuple( - "FeatureIndices", - ["int_features", "float_features", "bytes_features"], - verbose=False, + "FeatureIndices", ["int_features", "float_features", "bytes_features"] ) int_features_indices = [features_list.index(key) for key in int_features] float_features_indices = [ @@ -110,9 +108,7 @@ """Find the indices of different feature types.""" feature_types = [type(value) for value in record] FeatureIndices = namedtuple( - "FeatureIndices", - ["int_features", "float_features", "bytes_features"], - verbose=False, + "FeatureIndices", ["int_features", "float_features", "bytes_features"] ) return FeatureIndices( [i for i, x in enumerate(feature_types) if x == int], diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,7 +1,6 @@ -from setuptools import setup, find_packages +from setuptools import find_packages, setup - -with open('elasticdl/requirements.txt') as f: +with open("elasticdl/requirements.txt") as f: requirements = f.read().splitlines() setup(
{"golden_diff": "diff --git a/elasticdl/python/common/odps_recordio_conversion_utils.py b/elasticdl/python/common/odps_recordio_conversion_utils.py\n--- a/elasticdl/python/common/odps_recordio_conversion_utils.py\n+++ b/elasticdl/python/common/odps_recordio_conversion_utils.py\n@@ -50,9 +50,7 @@\n ):\n \"\"\"Finds the indices for different types of features.\"\"\"\n FeatureIndices = namedtuple(\n- \"FeatureIndices\",\n- [\"int_features\", \"float_features\", \"bytes_features\"],\n- verbose=False,\n+ \"FeatureIndices\", [\"int_features\", \"float_features\", \"bytes_features\"]\n )\n int_features_indices = [features_list.index(key) for key in int_features]\n float_features_indices = [\n@@ -110,9 +108,7 @@\n \"\"\"Find the indices of different feature types.\"\"\"\n feature_types = [type(value) for value in record]\n FeatureIndices = namedtuple(\n- \"FeatureIndices\",\n- [\"int_features\", \"float_features\", \"bytes_features\"],\n- verbose=False,\n+ \"FeatureIndices\", [\"int_features\", \"float_features\", \"bytes_features\"]\n )\n return FeatureIndices(\n [i for i, x in enumerate(feature_types) if x == int],\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,7 +1,6 @@\n-from setuptools import setup, find_packages\n+from setuptools import find_packages, setup\n \n-\n-with open('elasticdl/requirements.txt') as f:\n+with open(\"elasticdl/requirements.txt\") as f:\n requirements = f.read().splitlines()\n \n setup(\n", "issue": "Python version support\nI run the unit tests in a local environment, with Python 3.7.4\r\n\r\nhttps://github.com/wangkuiyi/elasticdl/blob/424a9b45999fe77eb671d7cfe86d7e22130f263b/elasticdl/python/common/odps_recordio_conversion_utils.py#L52-L56\r\n\r\nI find that it throws an exception. This is an interface change from Python 3.6 to Python 3.7\r\nPlease refer to https://docs.python.org/3/library/collections.html#collections.namedtuple\r\n\r\n```\r\nChanged in version 3.1: Added support for rename.\r\n\r\nChanged in version 3.6: The verbose and rename parameters became keyword-only arguments.\r\n\r\nChanged in version 3.6: Added the module parameter.\r\n\r\nChanged in version 3.7: Remove the verbose parameter and the _source attribute.\r\n\r\nChanged in version 3.7: Added the defaults parameter and the _field_defaults attribute.\r\n```\r\n\r\nHere is just a record.\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict, namedtuple\nfrom datetime import datetime as _datetime\n\nimport numpy as np\nimport recordio\nimport tensorflow as tf\nfrom odps.types import (\n Bigint,\n Binary,\n Boolean,\n Datetime,\n Double,\n Float,\n Int,\n Smallint,\n String,\n Tinyint,\n)\n\n\ndef _infer_primitive_data_type(value):\n \"\"\"Infer the primitive data type for an ODPS value.\"\"\"\n integer_builtins = (int, np.integer)\n float_builtins = (float, np.float)\n\n odps_primitive_to_builtin_types = OrderedDict(\n (\n (Bigint(), integer_builtins),\n (Tinyint(), integer_builtins),\n (Smallint(), integer_builtins),\n (Int(), integer_builtins),\n (Double(), float_builtins),\n (Float(), float_builtins),\n (String(), (str, bytes)),\n (Binary(), bytes),\n (Datetime(), _datetime),\n (Boolean(), bool),\n )\n )\n for data_type, builtin_types in odps_primitive_to_builtin_types.items():\n if isinstance(value, builtin_types):\n return builtin_types\n else:\n return None\n\n\ndef _find_features_indices(\n features_list, int_features, float_features, bytes_features\n):\n \"\"\"Finds the indices for different types of features.\"\"\"\n FeatureIndices = namedtuple(\n \"FeatureIndices\",\n [\"int_features\", \"float_features\", \"bytes_features\"],\n verbose=False,\n )\n int_features_indices = [features_list.index(key) for key in int_features]\n float_features_indices = [\n features_list.index(key) for key in float_features\n ]\n bytes_features_indices = [\n features_list.index(key) for key in bytes_features\n ]\n return FeatureIndices(\n int_features_indices, float_features_indices, bytes_features_indices\n )\n\n\ndef _parse_row_to_example(record, features_list, feature_indices):\n \"\"\"\n Parses one row (a flat list or one-dimensional numpy array)\n to a TensorFlow Example.\n \"\"\"\n if isinstance(record, list):\n record = np.array(record, dtype=object)\n\n example = tf.train.Example()\n # Note: these cannot be constructed dynamically since\n # we cannot assign a value to an embedded message\n # field in protobuf\n for feature_ind in feature_indices.int_features:\n example.features.feature[\n features_list[feature_ind]\n ].int64_list.value.append(\n int(_maybe_encode_unicode_string(record[feature_ind]) or 0)\n )\n for feature_ind in feature_indices.float_features:\n example.features.feature[\n features_list[feature_ind]\n ].float_list.value.append(\n float(_maybe_encode_unicode_string(record[feature_ind]) or 0.0)\n )\n for feature_ind in feature_indices.bytes_features:\n example.features.feature[\n features_list[feature_ind]\n ].bytes_list.value.append(\n _maybe_encode_unicode_string(record[feature_ind])\n )\n return example\n\n\ndef _maybe_encode_unicode_string(record):\n \"\"\"Encodes unicode strings if needed.\"\"\"\n if isinstance(record, str):\n record = bytes(record, \"utf-8\").strip()\n return record\n\n\ndef _find_feature_indices_from_record(record):\n \"\"\"Find the indices of different feature types.\"\"\"\n feature_types = [type(value) for value in record]\n FeatureIndices = namedtuple(\n \"FeatureIndices\",\n [\"int_features\", \"float_features\", \"bytes_features\"],\n verbose=False,\n )\n return FeatureIndices(\n [i for i, x in enumerate(feature_types) if x == int],\n [i for i, x in enumerate(feature_types) if x == float],\n [i for i, x in enumerate(feature_types) if x == str],\n )\n\n\ndef write_recordio_shards_from_iterator(\n records_iter, features_list, output_dir, records_per_shard\n):\n \"\"\"Writes RecordIO files from Python iterator of numpy arrays.\"\"\"\n # Take the first record batch to check whether it contains multiple items\n first_record_batch = next(records_iter)\n is_first_record_batch_consumed = False\n is_multi_items_per_batch = any(\n isinstance(i, list) for i in first_record_batch\n )\n\n # Find the features of different types that will be used\n # in `_parse_row_to_example()` later\n record = (\n first_record_batch[0]\n if is_multi_items_per_batch\n else first_record_batch\n )\n feature_indices = _find_feature_indices_from_record(record)\n\n writer = None\n rows_written = 0\n shards_written = 0\n while True:\n try:\n # Make sure to consume the first record batch\n if is_first_record_batch_consumed:\n record_batch = next(records_iter)\n else:\n record_batch = first_record_batch\n is_first_record_batch_consumed = True\n if not is_multi_items_per_batch:\n record_batch = [record_batch]\n\n # Write each record in the batch to a RecordIO shard\n for record in record_batch:\n # Initialize the writer for the new shard\n if rows_written % records_per_shard == 0:\n if writer is not None:\n writer.close()\n shard_file_path = os.path.join(\n output_dir, \"data-%05d\" % shards_written\n )\n writer = recordio.Writer(shard_file_path)\n shards_written += 1\n\n writer.write(\n _parse_row_to_example(\n record, features_list, feature_indices\n ).SerializeToString()\n )\n rows_written += 1\n except StopIteration:\n break\n\n writer.close()\n", "path": "elasticdl/python/common/odps_recordio_conversion_utils.py"}, {"content": "from setuptools import setup, find_packages\n\n\nwith open('elasticdl/requirements.txt') as f:\n requirements = f.read().splitlines()\n\nsetup(\n name=\"elasticdl\",\n version=\"0.0.1\",\n description=\"A Kubernetes-native Elastic Deep Learning Framework\",\n author=\"Ant Financial\",\n url=\"https://github.com/wangkuiyi/elasticdl\",\n install_requires=requirements,\n packages=find_packages(exclude=[\"*test*\"]),\n package_data={\"\": [\"proto/elasticdl.proto\", \"docker/*\", \"Makefile\"]},\n entry_points={\n \"console_scripts\": [\"elasticdl=elasticdl.python.elasticdl.client:main\"]\n },\n)\n", "path": "setup.py"}]}
2,609
358
gh_patches_debug_11165
rasdani/github-patches
git_diff
safe-global__safe-config-service-1092
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `/api/v1/chains` returns unexpected nativeCurrency.logoUri path **Describe the bug** I deployed a project locally using the code from `https://github.com/safe-global/safe-infrastructure`. However, after adding a configuration for a chain in cfg, the `/v1/chains` endpoint of cgw started returning a 500 error. After enabling debug logs, I found that cgw returned the following error: ```json { "build_number": "edc276d", "level": "error", "message": { "chain_id": null, "client_ip": "172.70.223.68", "detail": "[\n {\n \"validation\": \"url\",\n \"code\": \"invalid_string\",\n \"message\": \"Invalid url\",\n \"path\": [\n \"nativeCurrency\",\n \"logoUri\"\n ]\n }\n]", "method": "GET", "path": "/v1/chains", "response_time_ms": 107.19920100085437, "route": "/v1/chains", "safe_app_user_agent": null, "status_code": 500 }, "request_id": "1e37d27b-02b6-468a-a290-b6e06f7bc911", "timestamp": "2024-03-29T07:54:29.220Z", "version": "v1.31.0" } ```` After investigating further, I found that the `nativeCurrency.logoUri` in the response data from cfg is not an absolute path: ```json { "count": 1, "next": null, "previous": null, "results": [ { ... "nativeCurrency": { "name": "ETH", "symbol": "ETH", "decimals": 18, "logoUri": "/media/chains/100/currency_logo_3fdtfcf.png" }, ... ] } ``` **To Reproduce** Steps to reproduce the behavior: - Clone the code from `https://github.com/safe-global/safe-infrastructure` - Modify the MEDIA_URL in cfg.env to `/media/` **Expected behavior** The `nativeCurrency.logoUri` in the response data from cfg should be `https://config.xxx.xyz/media/chains/100/currency_logo_3fdtfcf.png` instead of `/media/chains/100/currency_logo_3fdtfcf.png`. **Environment (please complete the following information):** - Version of safe-config-service: latest(possibly [v2.75.0]) **Additional context** I made the following edits to the `src/chains/serializers.py` file from ```python @staticmethod @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc] def get_native_currency(obj: Chain) -> ReturnDict: return CurrencySerializer(obj).data ``` to ```python @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc] def get_native_currency(self, obj: Chain) -> ReturnDict: request = self.context.get('request') logo_uri = obj.currency_logo_uri dict = CurrencySerializer(obj).data dict.update({"logo_uri":request.build_absolute_uri(logo_uri)}) return dict ``` After applying the temporary workaround, cfg is able to return the expected data. However, since I'm not familiar with Python and Django, this was just a temporary solution on my part. I hope that you can find a better way to fix this issue. </issue> <code> [start of src/chains/serializers.py] 1 from abc import abstractmethod 2 3 from drf_yasg.utils import swagger_serializer_method 4 from gnosis.eth.django.serializers import EthereumAddressField 5 from rest_framework import serializers 6 from rest_framework.exceptions import APIException 7 from rest_framework.utils.serializer_helpers import ReturnDict 8 9 from .models import Chain, Feature, GasPrice, Wallet 10 11 12 class GasPriceOracleSerializer(serializers.Serializer[GasPrice]): 13 type = serializers.ReadOnlyField(default="oracle") 14 uri = serializers.URLField(source="oracle_uri") 15 gas_parameter = serializers.CharField(source="oracle_parameter") 16 gwei_factor = serializers.DecimalField(max_digits=19, decimal_places=9) 17 18 19 class GasPriceFixedSerializer(serializers.Serializer[GasPrice]): 20 type = serializers.ReadOnlyField(default="fixed") 21 wei_value = serializers.CharField(source="fixed_wei_value") 22 23 24 class GasPriceFixed1559Serializer(serializers.Serializer[GasPrice]): 25 type = serializers.ReadOnlyField(default="fixed1559") 26 max_fee_per_gas = serializers.CharField() 27 max_priority_fee_per_gas = serializers.CharField() 28 29 30 class GasPriceSerializer(serializers.Serializer[GasPrice]): 31 def to_representation(self, instance: GasPrice) -> ReturnDict: 32 if ( 33 instance.oracle_uri 34 and instance.fixed_wei_value is None 35 and instance.max_fee_per_gas is None 36 and instance.max_priority_fee_per_gas is None 37 ): 38 return GasPriceOracleSerializer(instance).data 39 elif ( 40 instance.fixed_wei_value 41 and instance.oracle_uri is None 42 and instance.max_fee_per_gas is None 43 and instance.max_priority_fee_per_gas is None 44 ): 45 return GasPriceFixedSerializer(instance).data 46 elif ( 47 instance.max_fee_per_gas 48 and instance.max_priority_fee_per_gas 49 and instance.oracle_uri is None 50 and instance.fixed_wei_value is None 51 ): 52 return GasPriceFixed1559Serializer(instance).data 53 else: 54 raise APIException( 55 f"The gas price oracle or a fixed gas price was not provided for chain {instance.chain}" 56 ) 57 58 59 class ThemeSerializer(serializers.Serializer[Chain]): 60 text_color = serializers.CharField(source="theme_text_color") 61 background_color = serializers.CharField(source="theme_background_color") 62 63 64 class CurrencySerializer(serializers.Serializer[Chain]): 65 name = serializers.CharField(source="currency_name") 66 symbol = serializers.CharField(source="currency_symbol") 67 decimals = serializers.IntegerField(source="currency_decimals") 68 logo_uri = serializers.ImageField(use_url=True, source="currency_logo_uri") 69 70 71 class BaseRpcUriSerializer(serializers.Serializer[Chain]): 72 authentication = serializers.SerializerMethodField() 73 value = serializers.SerializerMethodField(method_name="get_rpc_value") 74 75 @abstractmethod 76 def get_authentication(self, obj: Chain) -> str: # pragma: no cover 77 pass 78 79 @abstractmethod 80 def get_rpc_value(self, obj: Chain) -> str: # pragma: no cover 81 pass 82 83 84 class RpcUriSerializer(BaseRpcUriSerializer): 85 def get_authentication(self, obj: Chain) -> str: 86 return obj.rpc_authentication 87 88 def get_rpc_value(self, obj: Chain) -> str: 89 return obj.rpc_uri 90 91 92 class SafeAppsRpcUriSerializer(BaseRpcUriSerializer): 93 def get_authentication(self, obj: Chain) -> str: 94 return obj.safe_apps_rpc_authentication 95 96 def get_rpc_value(self, obj: Chain) -> str: 97 return obj.safe_apps_rpc_uri 98 99 100 class PublicRpcUriSerializer(BaseRpcUriSerializer): 101 def get_authentication(self, obj: Chain) -> str: 102 return obj.public_rpc_authentication 103 104 def get_rpc_value(self, obj: Chain) -> str: 105 return obj.public_rpc_uri 106 107 108 class BlockExplorerUriTemplateSerializer(serializers.Serializer[Chain]): 109 address = serializers.URLField(source="block_explorer_uri_address_template") 110 tx_hash = serializers.URLField(source="block_explorer_uri_tx_hash_template") 111 api = serializers.URLField(source="block_explorer_uri_api_template") 112 113 114 class FeatureSerializer(serializers.Serializer[Feature]): 115 class Meta: 116 ref_name = "chains.serializers.FeatureSerializer" 117 118 @staticmethod 119 def to_representation(instance: Feature) -> str: 120 return instance.key 121 122 123 class WalletSerializer(serializers.Serializer[Wallet]): 124 @staticmethod 125 def to_representation(instance: Wallet) -> str: 126 return instance.key 127 128 129 class ChainSerializer(serializers.ModelSerializer[Chain]): 130 chain_id = serializers.CharField(source="id") 131 chain_name = serializers.CharField(source="name") 132 short_name = serializers.CharField() 133 chain_logo_uri = serializers.ImageField(use_url=True) 134 rpc_uri = serializers.SerializerMethodField() 135 safe_apps_rpc_uri = serializers.SerializerMethodField() 136 public_rpc_uri = serializers.SerializerMethodField() 137 block_explorer_uri_template = serializers.SerializerMethodField() 138 native_currency = serializers.SerializerMethodField() 139 transaction_service = serializers.URLField( 140 source="transaction_service_uri", default=None 141 ) 142 vpc_transaction_service = serializers.URLField(source="vpc_transaction_service_uri") 143 theme = serializers.SerializerMethodField() 144 gas_price = serializers.SerializerMethodField() 145 ens_registry_address = EthereumAddressField() 146 disabled_wallets = serializers.SerializerMethodField() 147 features = serializers.SerializerMethodField() 148 149 class Meta: 150 model = Chain 151 fields = [ 152 "chain_id", 153 "chain_name", 154 "short_name", 155 "description", 156 "chain_logo_uri", 157 "l2", 158 "is_testnet", 159 "rpc_uri", 160 "safe_apps_rpc_uri", 161 "public_rpc_uri", 162 "block_explorer_uri_template", 163 "native_currency", 164 "transaction_service", 165 "vpc_transaction_service", 166 "theme", 167 "gas_price", 168 "ens_registry_address", 169 "recommended_master_copy_version", 170 "disabled_wallets", 171 "features", 172 ] 173 174 @staticmethod 175 @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc] 176 def get_native_currency(obj: Chain) -> ReturnDict: 177 return CurrencySerializer(obj).data 178 179 @staticmethod 180 @swagger_serializer_method(serializer_or_field=ThemeSerializer) # type: ignore[misc] 181 def get_theme(obj: Chain) -> ReturnDict: 182 return ThemeSerializer(obj).data 183 184 @staticmethod 185 @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc] 186 def get_safe_apps_rpc_uri(obj: Chain) -> ReturnDict: 187 return SafeAppsRpcUriSerializer(obj).data 188 189 @staticmethod 190 @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc] 191 def get_rpc_uri(obj: Chain) -> ReturnDict: 192 return RpcUriSerializer(obj).data 193 194 @staticmethod 195 @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc] 196 def get_public_rpc_uri(obj: Chain) -> ReturnDict: 197 return PublicRpcUriSerializer(obj).data 198 199 @staticmethod 200 @swagger_serializer_method(serializer_or_field=BlockExplorerUriTemplateSerializer) # type: ignore[misc] 201 def get_block_explorer_uri_template(obj: Chain) -> ReturnDict: 202 return BlockExplorerUriTemplateSerializer(obj).data 203 204 @swagger_serializer_method(serializer_or_field=GasPriceSerializer) # type: ignore[misc] 205 def get_gas_price(self, instance) -> ReturnDict: # type: ignore[no-untyped-def] 206 ranked_gas_prices = instance.gasprice_set.all().order_by("rank") 207 return GasPriceSerializer(ranked_gas_prices, many=True).data 208 209 @swagger_serializer_method(serializer_or_field=WalletSerializer) # type: ignore[misc] 210 def get_disabled_wallets(self, instance) -> ReturnDict: # type: ignore[no-untyped-def] 211 disabled_wallets = instance.get_disabled_wallets().order_by("key") 212 return WalletSerializer(disabled_wallets, many=True).data 213 214 @swagger_serializer_method(serializer_or_field=FeatureSerializer) # type: ignore[misc] 215 def get_features(self, instance) -> ReturnDict: # type: ignore[no-untyped-def] 216 enabled_features = instance.feature_set.all().order_by("key") 217 return FeatureSerializer(enabled_features, many=True).data 218 [end of src/chains/serializers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/chains/serializers.py b/src/chains/serializers.py --- a/src/chains/serializers.py +++ b/src/chains/serializers.py @@ -171,10 +171,11 @@ "features", ] - @staticmethod @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc] - def get_native_currency(obj: Chain) -> ReturnDict: - return CurrencySerializer(obj).data + def get_native_currency(self, obj: Chain) -> ReturnDict: + return CurrencySerializer( + obj, context={"request": self.context["request"]} + ).data @staticmethod @swagger_serializer_method(serializer_or_field=ThemeSerializer) # type: ignore[misc]
{"golden_diff": "diff --git a/src/chains/serializers.py b/src/chains/serializers.py\n--- a/src/chains/serializers.py\n+++ b/src/chains/serializers.py\n@@ -171,10 +171,11 @@\n \"features\",\n ]\n \n- @staticmethod\n @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc]\n- def get_native_currency(obj: Chain) -> ReturnDict:\n- return CurrencySerializer(obj).data\n+ def get_native_currency(self, obj: Chain) -> ReturnDict:\n+ return CurrencySerializer(\n+ obj, context={\"request\": self.context[\"request\"]}\n+ ).data\n \n @staticmethod\n @swagger_serializer_method(serializer_or_field=ThemeSerializer) # type: ignore[misc]\n", "issue": "`/api/v1/chains` returns unexpected nativeCurrency.logoUri path\n**Describe the bug**\r\nI deployed a project locally using the code from `https://github.com/safe-global/safe-infrastructure`. However, after adding a configuration for a chain in cfg, the `/v1/chains` endpoint of cgw started returning a 500 error. After enabling debug logs, I found that cgw returned the following error:\r\n```json\r\n{\r\n \"build_number\": \"edc276d\",\r\n \"level\": \"error\",\r\n \"message\": {\r\n \"chain_id\": null,\r\n \"client_ip\": \"172.70.223.68\",\r\n \"detail\": \"[\\n {\\n \\\"validation\\\": \\\"url\\\",\\n \\\"code\\\": \\\"invalid_string\\\",\\n \\\"message\\\": \\\"Invalid url\\\",\\n \\\"path\\\": [\\n \\\"nativeCurrency\\\",\\n \\\"logoUri\\\"\\n ]\\n }\\n]\",\r\n \"method\": \"GET\",\r\n \"path\": \"/v1/chains\",\r\n \"response_time_ms\": 107.19920100085437,\r\n \"route\": \"/v1/chains\",\r\n \"safe_app_user_agent\": null,\r\n \"status_code\": 500\r\n },\r\n \"request_id\": \"1e37d27b-02b6-468a-a290-b6e06f7bc911\",\r\n \"timestamp\": \"2024-03-29T07:54:29.220Z\",\r\n \"version\": \"v1.31.0\"\r\n}\r\n````\r\nAfter investigating further, I found that the `nativeCurrency.logoUri` in the response data from cfg is not an absolute path:\r\n\r\n```json\r\n{\r\n \"count\": 1,\r\n \"next\": null,\r\n \"previous\": null,\r\n \"results\": [\r\n {\r\n ...\r\n \"nativeCurrency\": {\r\n \"name\": \"ETH\",\r\n \"symbol\": \"ETH\",\r\n \"decimals\": 18,\r\n \"logoUri\": \"/media/chains/100/currency_logo_3fdtfcf.png\"\r\n },\r\n ...\r\n ]\r\n}\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n - Clone the code from `https://github.com/safe-global/safe-infrastructure`\r\n - Modify the MEDIA_URL in cfg.env to `/media/`\r\n**Expected behavior**\r\nThe `nativeCurrency.logoUri` in the response data from cfg should be `https://config.xxx.xyz/media/chains/100/currency_logo_3fdtfcf.png` instead of `/media/chains/100/currency_logo_3fdtfcf.png`.\r\n\r\n**Environment (please complete the following information):**\r\n - Version of safe-config-service: latest(possibly [v2.75.0])\r\n \r\n**Additional context**\r\nI made the following edits to the `src/chains/serializers.py` file\r\nfrom\r\n```python\r\n @staticmethod\r\n @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc]\r\n def get_native_currency(obj: Chain) -> ReturnDict:\r\n return CurrencySerializer(obj).data\r\n```\r\nto\r\n```python\r\n @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc]\r\n def get_native_currency(self, obj: Chain) -> ReturnDict:\r\n request = self.context.get('request')\r\n logo_uri = obj.currency_logo_uri\r\n dict = CurrencySerializer(obj).data\r\n dict.update({\"logo_uri\":request.build_absolute_uri(logo_uri)})\r\n return dict\r\n```\r\nAfter applying the temporary workaround, cfg is able to return the expected data. However, since I'm not familiar with Python and Django, this was just a temporary solution on my part. I hope that you can find a better way to fix this issue.\n", "before_files": [{"content": "from abc import abstractmethod\n\nfrom drf_yasg.utils import swagger_serializer_method\nfrom gnosis.eth.django.serializers import EthereumAddressField\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.utils.serializer_helpers import ReturnDict\n\nfrom .models import Chain, Feature, GasPrice, Wallet\n\n\nclass GasPriceOracleSerializer(serializers.Serializer[GasPrice]):\n type = serializers.ReadOnlyField(default=\"oracle\")\n uri = serializers.URLField(source=\"oracle_uri\")\n gas_parameter = serializers.CharField(source=\"oracle_parameter\")\n gwei_factor = serializers.DecimalField(max_digits=19, decimal_places=9)\n\n\nclass GasPriceFixedSerializer(serializers.Serializer[GasPrice]):\n type = serializers.ReadOnlyField(default=\"fixed\")\n wei_value = serializers.CharField(source=\"fixed_wei_value\")\n\n\nclass GasPriceFixed1559Serializer(serializers.Serializer[GasPrice]):\n type = serializers.ReadOnlyField(default=\"fixed1559\")\n max_fee_per_gas = serializers.CharField()\n max_priority_fee_per_gas = serializers.CharField()\n\n\nclass GasPriceSerializer(serializers.Serializer[GasPrice]):\n def to_representation(self, instance: GasPrice) -> ReturnDict:\n if (\n instance.oracle_uri\n and instance.fixed_wei_value is None\n and instance.max_fee_per_gas is None\n and instance.max_priority_fee_per_gas is None\n ):\n return GasPriceOracleSerializer(instance).data\n elif (\n instance.fixed_wei_value\n and instance.oracle_uri is None\n and instance.max_fee_per_gas is None\n and instance.max_priority_fee_per_gas is None\n ):\n return GasPriceFixedSerializer(instance).data\n elif (\n instance.max_fee_per_gas\n and instance.max_priority_fee_per_gas\n and instance.oracle_uri is None\n and instance.fixed_wei_value is None\n ):\n return GasPriceFixed1559Serializer(instance).data\n else:\n raise APIException(\n f\"The gas price oracle or a fixed gas price was not provided for chain {instance.chain}\"\n )\n\n\nclass ThemeSerializer(serializers.Serializer[Chain]):\n text_color = serializers.CharField(source=\"theme_text_color\")\n background_color = serializers.CharField(source=\"theme_background_color\")\n\n\nclass CurrencySerializer(serializers.Serializer[Chain]):\n name = serializers.CharField(source=\"currency_name\")\n symbol = serializers.CharField(source=\"currency_symbol\")\n decimals = serializers.IntegerField(source=\"currency_decimals\")\n logo_uri = serializers.ImageField(use_url=True, source=\"currency_logo_uri\")\n\n\nclass BaseRpcUriSerializer(serializers.Serializer[Chain]):\n authentication = serializers.SerializerMethodField()\n value = serializers.SerializerMethodField(method_name=\"get_rpc_value\")\n\n @abstractmethod\n def get_authentication(self, obj: Chain) -> str: # pragma: no cover\n pass\n\n @abstractmethod\n def get_rpc_value(self, obj: Chain) -> str: # pragma: no cover\n pass\n\n\nclass RpcUriSerializer(BaseRpcUriSerializer):\n def get_authentication(self, obj: Chain) -> str:\n return obj.rpc_authentication\n\n def get_rpc_value(self, obj: Chain) -> str:\n return obj.rpc_uri\n\n\nclass SafeAppsRpcUriSerializer(BaseRpcUriSerializer):\n def get_authentication(self, obj: Chain) -> str:\n return obj.safe_apps_rpc_authentication\n\n def get_rpc_value(self, obj: Chain) -> str:\n return obj.safe_apps_rpc_uri\n\n\nclass PublicRpcUriSerializer(BaseRpcUriSerializer):\n def get_authentication(self, obj: Chain) -> str:\n return obj.public_rpc_authentication\n\n def get_rpc_value(self, obj: Chain) -> str:\n return obj.public_rpc_uri\n\n\nclass BlockExplorerUriTemplateSerializer(serializers.Serializer[Chain]):\n address = serializers.URLField(source=\"block_explorer_uri_address_template\")\n tx_hash = serializers.URLField(source=\"block_explorer_uri_tx_hash_template\")\n api = serializers.URLField(source=\"block_explorer_uri_api_template\")\n\n\nclass FeatureSerializer(serializers.Serializer[Feature]):\n class Meta:\n ref_name = \"chains.serializers.FeatureSerializer\"\n\n @staticmethod\n def to_representation(instance: Feature) -> str:\n return instance.key\n\n\nclass WalletSerializer(serializers.Serializer[Wallet]):\n @staticmethod\n def to_representation(instance: Wallet) -> str:\n return instance.key\n\n\nclass ChainSerializer(serializers.ModelSerializer[Chain]):\n chain_id = serializers.CharField(source=\"id\")\n chain_name = serializers.CharField(source=\"name\")\n short_name = serializers.CharField()\n chain_logo_uri = serializers.ImageField(use_url=True)\n rpc_uri = serializers.SerializerMethodField()\n safe_apps_rpc_uri = serializers.SerializerMethodField()\n public_rpc_uri = serializers.SerializerMethodField()\n block_explorer_uri_template = serializers.SerializerMethodField()\n native_currency = serializers.SerializerMethodField()\n transaction_service = serializers.URLField(\n source=\"transaction_service_uri\", default=None\n )\n vpc_transaction_service = serializers.URLField(source=\"vpc_transaction_service_uri\")\n theme = serializers.SerializerMethodField()\n gas_price = serializers.SerializerMethodField()\n ens_registry_address = EthereumAddressField()\n disabled_wallets = serializers.SerializerMethodField()\n features = serializers.SerializerMethodField()\n\n class Meta:\n model = Chain\n fields = [\n \"chain_id\",\n \"chain_name\",\n \"short_name\",\n \"description\",\n \"chain_logo_uri\",\n \"l2\",\n \"is_testnet\",\n \"rpc_uri\",\n \"safe_apps_rpc_uri\",\n \"public_rpc_uri\",\n \"block_explorer_uri_template\",\n \"native_currency\",\n \"transaction_service\",\n \"vpc_transaction_service\",\n \"theme\",\n \"gas_price\",\n \"ens_registry_address\",\n \"recommended_master_copy_version\",\n \"disabled_wallets\",\n \"features\",\n ]\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=CurrencySerializer) # type: ignore[misc]\n def get_native_currency(obj: Chain) -> ReturnDict:\n return CurrencySerializer(obj).data\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=ThemeSerializer) # type: ignore[misc]\n def get_theme(obj: Chain) -> ReturnDict:\n return ThemeSerializer(obj).data\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc]\n def get_safe_apps_rpc_uri(obj: Chain) -> ReturnDict:\n return SafeAppsRpcUriSerializer(obj).data\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc]\n def get_rpc_uri(obj: Chain) -> ReturnDict:\n return RpcUriSerializer(obj).data\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=BaseRpcUriSerializer) # type: ignore[misc]\n def get_public_rpc_uri(obj: Chain) -> ReturnDict:\n return PublicRpcUriSerializer(obj).data\n\n @staticmethod\n @swagger_serializer_method(serializer_or_field=BlockExplorerUriTemplateSerializer) # type: ignore[misc]\n def get_block_explorer_uri_template(obj: Chain) -> ReturnDict:\n return BlockExplorerUriTemplateSerializer(obj).data\n\n @swagger_serializer_method(serializer_or_field=GasPriceSerializer) # type: ignore[misc]\n def get_gas_price(self, instance) -> ReturnDict: # type: ignore[no-untyped-def]\n ranked_gas_prices = instance.gasprice_set.all().order_by(\"rank\")\n return GasPriceSerializer(ranked_gas_prices, many=True).data\n\n @swagger_serializer_method(serializer_or_field=WalletSerializer) # type: ignore[misc]\n def get_disabled_wallets(self, instance) -> ReturnDict: # type: ignore[no-untyped-def]\n disabled_wallets = instance.get_disabled_wallets().order_by(\"key\")\n return WalletSerializer(disabled_wallets, many=True).data\n\n @swagger_serializer_method(serializer_or_field=FeatureSerializer) # type: ignore[misc]\n def get_features(self, instance) -> ReturnDict: # type: ignore[no-untyped-def]\n enabled_features = instance.feature_set.all().order_by(\"key\")\n return FeatureSerializer(enabled_features, many=True).data\n", "path": "src/chains/serializers.py"}]}
3,667
175
gh_patches_debug_35673
rasdani/github-patches
git_diff
amundsen-io__amundsen-1573
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug Report - Postegres / Redshift Extractors have inconsistent behavior <!--- Provide a general summary of the issue in the Title above --> <!--- Look through existing open and closed issues to see if someone has reported the issue before --> ## Expected Behavior ```python # For RedshiftMetadataExtractor redshift_metadata_extractor_config = ConfigFactory.from_dict({ 'extractor.redshift_metadata.where_clause_suffix': 'schema <> "foo"' }) ``` ## Current Behavior ```python # For RedshiftMetadataExtractor redshift_metadata_extractor_config = ConfigFactory.from_dict({ 'extractor.redshift_metadata.where_clause_suffix': 'where schema <> "foo"' }) ``` ## Possible Solution Update redshift extractor to properly build suffix ## Screenshots (if appropriate) ## Context Both the `PostgresMetadataExtractor` and the `RedshiftMetadataExtractor` take the config `extractor.redshift_metadata.where_clause_suffix` as they are both based off of `BasePostgresMetadataExtractor`. However they require slightly different different sql partials, which is unexpected: The `where_clause_suffix` for `RedshiftMetadataExtractor` takes a partial sql statement **without** a `WHERE`. The `where_clause_suffix` for `PostgresMetadataExtractor` take a partial sql statement **with** a `WHERE`. The name `where_clause_suffix` implies that its a statement appended to a where clause and should _not_ (IMHO) require a `WHERE` statement. <!--- How has this issue affected you? --> <!--- Providing context helps us come up with a solution that is most useful in the real world --> ## Your Environment <!--- Include as many relevant details about the environment you experienced the bug in --> * Amunsen version used: amundsen-databuilder 6.3.1 * Data warehouse stores: Neo4j * Deployment (k8s or native): terraform * Link to your fork or repository: </issue> <code> [start of databuilder/databuilder/extractor/redshift_metadata_extractor.py] 1 # Copyright Contributors to the Amundsen project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 from typing import ( # noqa: F401 5 Any, Dict, Iterator, Union, 6 ) 7 8 from pyhocon import ConfigFactory, ConfigTree # noqa: F401 9 10 from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor 11 12 13 class RedshiftMetadataExtractor(BasePostgresMetadataExtractor): 14 """ 15 Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor 16 17 This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views, 18 we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS(). 19 """ 20 21 def get_sql_statement(self, use_catalog_as_cluster_name: bool, where_clause_suffix: str) -> str: 22 if use_catalog_as_cluster_name: 23 cluster_source = "CURRENT_DATABASE()" 24 else: 25 cluster_source = f"'{self._cluster}'" 26 27 return """ 28 SELECT 29 * 30 FROM ( 31 SELECT 32 {cluster_source} as cluster, 33 c.table_schema as schema, 34 c.table_name as name, 35 pgtd.description as description, 36 c.column_name as col_name, 37 c.data_type as col_type, 38 pgcd.description as col_description, 39 ordinal_position as col_sort_order 40 FROM INFORMATION_SCHEMA.COLUMNS c 41 INNER JOIN 42 pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname 43 LEFT JOIN 44 pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position 45 LEFT JOIN 46 pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0 47 48 UNION 49 50 SELECT 51 {cluster_source} as cluster, 52 view_schema as schema, 53 view_name as name, 54 NULL as description, 55 column_name as col_name, 56 data_type as col_type, 57 NULL as col_description, 58 ordinal_position as col_sort_order 59 FROM 60 PG_GET_LATE_BINDING_VIEW_COLS() 61 COLS(view_schema NAME, view_name NAME, column_name NAME, data_type VARCHAR, ordinal_position INT) 62 63 UNION 64 65 SELECT 66 {cluster_source} AS cluster, 67 schemaname AS schema, 68 tablename AS name, 69 NULL AS description, 70 columnname AS col_name, 71 external_type AS col_type, 72 NULL AS col_description, 73 columnnum AS col_sort_order 74 FROM svv_external_columns 75 ) 76 77 {where_clause_suffix} 78 ORDER by cluster, schema, name, col_sort_order ; 79 """.format( 80 cluster_source=cluster_source, 81 where_clause_suffix=where_clause_suffix, 82 ) 83 84 def get_scope(self) -> str: 85 return 'extractor.redshift_metadata' 86 [end of databuilder/databuilder/extractor/redshift_metadata_extractor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/databuilder/databuilder/extractor/redshift_metadata_extractor.py b/databuilder/databuilder/extractor/redshift_metadata_extractor.py --- a/databuilder/databuilder/extractor/redshift_metadata_extractor.py +++ b/databuilder/databuilder/extractor/redshift_metadata_extractor.py @@ -1,6 +1,7 @@ # Copyright Contributors to the Amundsen project. # SPDX-License-Identifier: Apache-2.0 +import logging from typing import ( # noqa: F401 Any, Dict, Iterator, Union, ) @@ -9,11 +10,14 @@ from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor +LOGGER = logging.getLogger(__name__) + class RedshiftMetadataExtractor(BasePostgresMetadataExtractor): """ Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor + This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views, we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS(). """ @@ -24,6 +28,15 @@ else: cluster_source = f"'{self._cluster}'" + if where_clause_suffix: + if where_clause_suffix.lower().startswith("where"): + LOGGER.warning("you no longer need to begin with 'where' in your suffix") + where_clause = where_clause_suffix + else: + where_clause = f"where {where_clause_suffix}" + else: + where_clause = "" + return """ SELECT * @@ -74,11 +87,11 @@ FROM svv_external_columns ) - {where_clause_suffix} + {where_clause} ORDER by cluster, schema, name, col_sort_order ; """.format( cluster_source=cluster_source, - where_clause_suffix=where_clause_suffix, + where_clause=where_clause, ) def get_scope(self) -> str:
{"golden_diff": "diff --git a/databuilder/databuilder/extractor/redshift_metadata_extractor.py b/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n--- a/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n+++ b/databuilder/databuilder/extractor/redshift_metadata_extractor.py\n@@ -1,6 +1,7 @@\n # Copyright Contributors to the Amundsen project.\n # SPDX-License-Identifier: Apache-2.0\n \n+import logging\n from typing import ( # noqa: F401\n Any, Dict, Iterator, Union,\n )\n@@ -9,11 +10,14 @@\n \n from databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor\n \n+LOGGER = logging.getLogger(__name__)\n+\n \n class RedshiftMetadataExtractor(BasePostgresMetadataExtractor):\n \"\"\"\n Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor\n \n+\n This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views,\n we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS().\n \"\"\"\n@@ -24,6 +28,15 @@\n else:\n cluster_source = f\"'{self._cluster}'\"\n \n+ if where_clause_suffix:\n+ if where_clause_suffix.lower().startswith(\"where\"):\n+ LOGGER.warning(\"you no longer need to begin with 'where' in your suffix\")\n+ where_clause = where_clause_suffix\n+ else:\n+ where_clause = f\"where {where_clause_suffix}\"\n+ else:\n+ where_clause = \"\"\n+\n return \"\"\"\n SELECT\n *\n@@ -74,11 +87,11 @@\n FROM svv_external_columns\n )\n \n- {where_clause_suffix}\n+ {where_clause}\n ORDER by cluster, schema, name, col_sort_order ;\n \"\"\".format(\n cluster_source=cluster_source,\n- where_clause_suffix=where_clause_suffix,\n+ where_clause=where_clause,\n )\n \n def get_scope(self) -> str:\n", "issue": "Bug Report - Postegres / Redshift Extractors have inconsistent behavior\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- Look through existing open and closed issues to see if someone has reported the issue before -->\r\n\r\n## Expected Behavior\r\n\r\n```python\r\n# For RedshiftMetadataExtractor\r\nredshift_metadata_extractor_config = ConfigFactory.from_dict({\r\n 'extractor.redshift_metadata.where_clause_suffix': 'schema <> \"foo\"'\r\n})\r\n```\r\n\r\n## Current Behavior\r\n\r\n```python\r\n# For RedshiftMetadataExtractor\r\nredshift_metadata_extractor_config = ConfigFactory.from_dict({\r\n 'extractor.redshift_metadata.where_clause_suffix': 'where schema <> \"foo\"'\r\n})\r\n```\r\n\r\n## Possible Solution\r\n\r\nUpdate redshift extractor to properly build suffix\r\n\r\n## Screenshots (if appropriate)\r\n\r\n## Context\r\n\r\nBoth the `PostgresMetadataExtractor` and the `RedshiftMetadataExtractor` take the config `extractor.redshift_metadata.where_clause_suffix` as they are both based off of `BasePostgresMetadataExtractor`.\r\n\r\nHowever they require slightly different different sql partials, which is unexpected: \r\n\r\nThe `where_clause_suffix` for `RedshiftMetadataExtractor` takes a partial sql statement **without** a `WHERE`.\r\nThe `where_clause_suffix` for `PostgresMetadataExtractor` take a partial sql statement **with** a `WHERE`.\r\n\r\nThe name `where_clause_suffix` implies that its a statement appended to a where clause and should _not_ (IMHO) require a `WHERE` statement.\r\n\r\n\r\n<!--- How has this issue affected you? -->\r\n<!--- Providing context helps us come up with a solution that is most useful in the real world -->\r\n\r\n## Your Environment\r\n<!--- Include as many relevant details about the environment you experienced the bug in -->\r\n* Amunsen version used: amundsen-databuilder 6.3.1\r\n* Data warehouse stores: Neo4j\r\n* Deployment (k8s or native): terraform\r\n* Link to your fork or repository:\n", "before_files": [{"content": "# Copyright Contributors to the Amundsen project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import ( # noqa: F401\n Any, Dict, Iterator, Union,\n)\n\nfrom pyhocon import ConfigFactory, ConfigTree # noqa: F401\n\nfrom databuilder.extractor.base_postgres_metadata_extractor import BasePostgresMetadataExtractor\n\n\nclass RedshiftMetadataExtractor(BasePostgresMetadataExtractor):\n \"\"\"\n Extracts Redshift table and column metadata from underlying meta store database using SQLAlchemyExtractor\n\n This differs from the PostgresMetadataExtractor because in order to support Redshift's late binding views,\n we need to join the INFORMATION_SCHEMA data against the function PG_GET_LATE_BINDING_VIEW_COLS().\n \"\"\"\n\n def get_sql_statement(self, use_catalog_as_cluster_name: bool, where_clause_suffix: str) -> str:\n if use_catalog_as_cluster_name:\n cluster_source = \"CURRENT_DATABASE()\"\n else:\n cluster_source = f\"'{self._cluster}'\"\n\n return \"\"\"\n SELECT\n *\n FROM (\n SELECT\n {cluster_source} as cluster,\n c.table_schema as schema,\n c.table_name as name,\n pgtd.description as description,\n c.column_name as col_name,\n c.data_type as col_type,\n pgcd.description as col_description,\n ordinal_position as col_sort_order\n FROM INFORMATION_SCHEMA.COLUMNS c\n INNER JOIN\n pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname\n LEFT JOIN\n pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position\n LEFT JOIN\n pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0\n\n UNION\n\n SELECT\n {cluster_source} as cluster,\n view_schema as schema,\n view_name as name,\n NULL as description,\n column_name as col_name,\n data_type as col_type,\n NULL as col_description,\n ordinal_position as col_sort_order\n FROM\n PG_GET_LATE_BINDING_VIEW_COLS()\n COLS(view_schema NAME, view_name NAME, column_name NAME, data_type VARCHAR, ordinal_position INT)\n\n UNION\n\n SELECT\n {cluster_source} AS cluster,\n schemaname AS schema,\n tablename AS name,\n NULL AS description,\n columnname AS col_name,\n external_type AS col_type,\n NULL AS col_description,\n columnnum AS col_sort_order\n FROM svv_external_columns\n )\n\n {where_clause_suffix}\n ORDER by cluster, schema, name, col_sort_order ;\n \"\"\".format(\n cluster_source=cluster_source,\n where_clause_suffix=where_clause_suffix,\n )\n\n def get_scope(self) -> str:\n return 'extractor.redshift_metadata'\n", "path": "databuilder/databuilder/extractor/redshift_metadata_extractor.py"}]}
1,742
445
gh_patches_debug_2371
rasdani/github-patches
git_diff
Kinto__kinto-1040
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Kinto Admin version not shown in footer ![screenshot from 2017-01-24 17-01-08](https://cloud.githubusercontent.com/assets/546692/22255110/b87c08e2-e256-11e6-9880-b85014bb9f0a.png) Looks like the effects of this are lost when packaged as a kinto plugin: https://github.com/Kinto/kinto-admin/commit/8b184b041121ed4affddfbc2ce98ce658226ee34 </issue> <code> [start of kinto/plugins/admin/__init__.py] 1 from pyramid.static import static_view 2 from pyramid.httpexceptions import HTTPTemporaryRedirect 3 4 5 def includeme(config): 6 # Process settings to remove storage wording. 7 8 # Expose capability. 9 config.add_api_capability( 10 "admin", 11 version="1.8.0", 12 description="Serves the admin console.", 13 url="https://github.com/Kinto/kinto-admin/", 14 ) 15 16 build_dir = static_view('kinto.plugins.admin:build', use_subpath=True) 17 config.add_route('catchall_static', '/admin/*subpath') 18 config.add_view(build_dir, route_name="catchall_static") 19 20 # Setup redirect without trailing slash. 21 def admin_redirect_view(request): 22 raise HTTPTemporaryRedirect(request.path + '/') 23 24 config.add_route('admin_redirect', '/admin') 25 config.add_view(admin_redirect_view, route_name="admin_redirect") 26 [end of kinto/plugins/admin/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py --- a/kinto/plugins/admin/__init__.py +++ b/kinto/plugins/admin/__init__.py @@ -8,7 +8,7 @@ # Expose capability. config.add_api_capability( "admin", - version="1.8.0", + version="1.8.1", description="Serves the admin console.", url="https://github.com/Kinto/kinto-admin/", )
{"golden_diff": "diff --git a/kinto/plugins/admin/__init__.py b/kinto/plugins/admin/__init__.py\n--- a/kinto/plugins/admin/__init__.py\n+++ b/kinto/plugins/admin/__init__.py\n@@ -8,7 +8,7 @@\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n- version=\"1.8.0\",\n+ version=\"1.8.1\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n", "issue": "Kinto Admin version not shown in footer\n\r\n![screenshot from 2017-01-24 17-01-08](https://cloud.githubusercontent.com/assets/546692/22255110/b87c08e2-e256-11e6-9880-b85014bb9f0a.png)\r\n\r\nLooks like the effects of this are lost when packaged as a kinto plugin:\r\n\r\nhttps://github.com/Kinto/kinto-admin/commit/8b184b041121ed4affddfbc2ce98ce658226ee34\n", "before_files": [{"content": "from pyramid.static import static_view\nfrom pyramid.httpexceptions import HTTPTemporaryRedirect\n\n\ndef includeme(config):\n # Process settings to remove storage wording.\n\n # Expose capability.\n config.add_api_capability(\n \"admin\",\n version=\"1.8.0\",\n description=\"Serves the admin console.\",\n url=\"https://github.com/Kinto/kinto-admin/\",\n )\n\n build_dir = static_view('kinto.plugins.admin:build', use_subpath=True)\n config.add_route('catchall_static', '/admin/*subpath')\n config.add_view(build_dir, route_name=\"catchall_static\")\n\n # Setup redirect without trailing slash.\n def admin_redirect_view(request):\n raise HTTPTemporaryRedirect(request.path + '/')\n\n config.add_route('admin_redirect', '/admin')\n config.add_view(admin_redirect_view, route_name=\"admin_redirect\")\n", "path": "kinto/plugins/admin/__init__.py"}]}
922
114
gh_patches_debug_174
rasdani/github-patches
git_diff
pulp__pulpcore-3469
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Expose "get_url" via the plugin interface </issue> <code> [start of pulpcore/plugin/util.py] 1 from pulpcore.app.role_util import ( # noqa 2 assign_role, 3 get_groups_with_perms, 4 get_groups_with_perms_attached_perms, 5 get_groups_with_perms_attached_roles, 6 get_objects_for_group, 7 get_objects_for_user, 8 get_perms_for_model, 9 get_users_with_perms, 10 get_users_with_perms_attached_perms, 11 get_users_with_perms_attached_roles, 12 remove_role, 13 ) 14 15 from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa 16 [end of pulpcore/plugin/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/plugin/util.py b/pulpcore/plugin/util.py --- a/pulpcore/plugin/util.py +++ b/pulpcore/plugin/util.py @@ -12,4 +12,4 @@ remove_role, ) -from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa +from pulpcore.app.util import get_artifact_url, get_url, gpg_verify, verify_signature # noqa
{"golden_diff": "diff --git a/pulpcore/plugin/util.py b/pulpcore/plugin/util.py\n--- a/pulpcore/plugin/util.py\n+++ b/pulpcore/plugin/util.py\n@@ -12,4 +12,4 @@\n remove_role,\n )\n \n-from pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa\n+from pulpcore.app.util import get_artifact_url, get_url, gpg_verify, verify_signature # noqa\n", "issue": "Expose \"get_url\" via the plugin interface\n\n", "before_files": [{"content": "from pulpcore.app.role_util import ( # noqa\n assign_role,\n get_groups_with_perms,\n get_groups_with_perms_attached_perms,\n get_groups_with_perms_attached_roles,\n get_objects_for_group,\n get_objects_for_user,\n get_perms_for_model,\n get_users_with_perms,\n get_users_with_perms_attached_perms,\n get_users_with_perms_attached_roles,\n remove_role,\n)\n\nfrom pulpcore.app.util import get_artifact_url, gpg_verify, verify_signature # noqa\n", "path": "pulpcore/plugin/util.py"}]}
673
100
gh_patches_debug_12475
rasdani/github-patches
git_diff
talonhub__community-1040
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "phones that" should DTRT and select the current word if nothing is selected there's really no need for both `phones that` and `phones word`, we can just make `phones that` do the right thing and select the current word if nothing is selected - ie. if `edit.selected_text()` returns the empty string? </issue> <code> [start of core/homophones/homophones.py] 1 import os 2 3 from talon import Context, Module, actions, app, clip, fs, imgui, ui 4 5 ######################################################################## 6 # global settings 7 ######################################################################## 8 9 # a list of homophones where each line is a comma separated list 10 # e.g. where,wear,ware 11 # a suitable one can be found here: 12 # https://github.com/pimentel/homophones 13 cwd = os.path.dirname(os.path.realpath(__file__)) 14 homophones_file = os.path.join(cwd, "homophones.csv") 15 # if quick_replace, then when a word is selected and only one homophone exists, 16 # replace it without bringing up the options 17 quick_replace = True 18 show_help = False 19 ######################################################################## 20 21 ctx = Context() 22 mod = Module() 23 mod.mode("homophones") 24 mod.list("homophones_canonicals", desc="list of words ") 25 26 main_screen = ui.main_screen() 27 28 29 def update_homophones(name, flags): 30 if name != homophones_file: 31 return 32 33 phones = {} 34 canonical_list = [] 35 with open(homophones_file) as f: 36 for line in f: 37 words = line.rstrip().split(",") 38 canonical_list.append(words[0]) 39 merged_words = set(words) 40 for word in words: 41 old_words = phones.get(word.lower(), []) 42 merged_words.update(old_words) 43 merged_words = sorted(merged_words) 44 for word in merged_words: 45 phones[word.lower()] = merged_words 46 47 global all_homophones 48 all_homophones = phones 49 ctx.lists["self.homophones_canonicals"] = canonical_list 50 51 52 update_homophones(homophones_file, None) 53 fs.watch(cwd, update_homophones) 54 active_word_list = None 55 is_selection = False 56 57 58 def close_homophones(): 59 gui.hide() 60 actions.mode.disable("user.homophones") 61 62 63 PHONES_FORMATTERS = [ 64 lambda word: word.capitalize(), 65 lambda word: word.upper(), 66 ] 67 68 69 def find_matching_format_function(word_with_formatting, format_functions): 70 """Finds the formatter function from a list of formatter functions which transforms a word into itself. 71 Returns an identity function if none exists""" 72 for formatter in format_functions: 73 formatted_word = formatter(word_with_formatting) 74 if word_with_formatting == formatted_word: 75 return formatter 76 77 return lambda word: word 78 79 80 def raise_homophones(word_to_find_homophones_for, forced=False, selection=False): 81 global quick_replace 82 global active_word_list 83 global show_help 84 global force_raise 85 global is_selection 86 87 force_raise = forced 88 is_selection = selection 89 90 if is_selection: 91 word_to_find_homophones_for = word_to_find_homophones_for.strip() 92 93 formatter = find_matching_format_function( 94 word_to_find_homophones_for, PHONES_FORMATTERS 95 ) 96 97 word_to_find_homophones_for = word_to_find_homophones_for.lower() 98 99 # We support plurals, but very naively. If we can't find your word but your word ends in an s, presume its plural 100 # and attempt to find the singular, then present the presumed plurals back. This could be improved! 101 if word_to_find_homophones_for in all_homophones: 102 valid_homophones = all_homophones[word_to_find_homophones_for] 103 elif ( 104 word_to_find_homophones_for[-1] == "s" 105 and word_to_find_homophones_for[:-1] in all_homophones 106 ): 107 valid_homophones = map( 108 lambda w: w + "s", all_homophones[word_to_find_homophones_for[:-1]] 109 ) 110 else: 111 app.notify( 112 "homophones.py", f'"{word_to_find_homophones_for}" not in homophones list' 113 ) 114 return 115 116 # Move current word to end of list to reduce searcher's cognitive load 117 valid_homophones_reordered = list( 118 filter( 119 lambda word_from_list: word_from_list.lower() 120 != word_to_find_homophones_for, 121 valid_homophones, 122 ) 123 ) + [word_to_find_homophones_for] 124 active_word_list = list(map(formatter, valid_homophones_reordered)) 125 126 if ( 127 is_selection 128 and len(active_word_list) == 2 129 and quick_replace 130 and not force_raise 131 ): 132 if word_to_find_homophones_for == active_word_list[0].lower(): 133 new = active_word_list[1] 134 else: 135 new = active_word_list[0] 136 137 clip.set(new) 138 actions.edit.paste() 139 140 return 141 142 actions.mode.enable("user.homophones") 143 show_help = False 144 gui.show() 145 146 147 @imgui.open(x=main_screen.x + main_screen.width / 2.6, y=main_screen.y) 148 def gui(gui: imgui.GUI): 149 global active_word_list 150 if show_help: 151 gui.text("Homephone help - todo") 152 else: 153 gui.text("Select a homophone") 154 gui.line() 155 index = 1 156 for word in active_word_list: 157 if gui.button(f"Choose {index}: {word}"): 158 actions.insert(actions.user.homophones_select(index)) 159 actions.user.homophones_hide() 160 index = index + 1 161 162 if gui.button("Phones hide"): 163 actions.user.homophones_hide() 164 165 166 def show_help_gui(): 167 global show_help 168 show_help = True 169 gui.show() 170 171 172 @mod.capture(rule="{self.homophones_canonicals}") 173 def homophones_canonical(m) -> str: 174 "Returns a single string" 175 return m.homophones_canonicals 176 177 178 @mod.action_class 179 class Actions: 180 def homophones_hide(): 181 """Hides the homophones display""" 182 close_homophones() 183 184 def homophones_show(m: str): 185 """Show the homophones display""" 186 raise_homophones(m, False, False) 187 188 def homophones_show_selection(): 189 """Show the homophones display for the selected text""" 190 raise_homophones(actions.edit.selected_text(), False, True) 191 192 def homophones_force_show(m: str): 193 """Show the homophones display forcibly""" 194 raise_homophones(m, True, False) 195 196 def homophones_force_show_selection(): 197 """Show the homophones display for the selected text forcibly""" 198 raise_homophones(actions.edit.selected_text(), True, True) 199 200 def homophones_select(number: int) -> str: 201 """selects the homophone by number""" 202 if number <= len(active_word_list) and number > 0: 203 return active_word_list[number - 1] 204 205 error = "homophones.py index {} is out of range (1-{})".format( 206 number, len(active_word_list) 207 ) 208 app.notify(error) 209 raise error 210 211 def homophones_get(word: str) -> [str] or None: 212 """Get homophones for the given word""" 213 word = word.lower() 214 if word in all_homophones: 215 return all_homophones[word] 216 return None 217 [end of core/homophones/homophones.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/homophones/homophones.py b/core/homophones/homophones.py --- a/core/homophones/homophones.py +++ b/core/homophones/homophones.py @@ -185,6 +185,15 @@ """Show the homophones display""" raise_homophones(m, False, False) + def homophones_show_auto(): + """Show homophones for selection, or current word if selection is empty.""" + text = actions.edit.selected_text() + if text: + actions.user.homophones_show(text) + else: + actions.edit.select_word() + actions.user.homophones_show_selection() + def homophones_show_selection(): """Show the homophones display for the selected text""" raise_homophones(actions.edit.selected_text(), False, True)
{"golden_diff": "diff --git a/core/homophones/homophones.py b/core/homophones/homophones.py\n--- a/core/homophones/homophones.py\n+++ b/core/homophones/homophones.py\n@@ -185,6 +185,15 @@\n \"\"\"Show the homophones display\"\"\"\n raise_homophones(m, False, False)\n \n+ def homophones_show_auto():\n+ \"\"\"Show homophones for selection, or current word if selection is empty.\"\"\"\n+ text = actions.edit.selected_text()\n+ if text:\n+ actions.user.homophones_show(text)\n+ else:\n+ actions.edit.select_word()\n+ actions.user.homophones_show_selection()\n+\n def homophones_show_selection():\n \"\"\"Show the homophones display for the selected text\"\"\"\n raise_homophones(actions.edit.selected_text(), False, True)\n", "issue": "\"phones that\" should DTRT and select the current word if nothing is selected\nthere's really no need for both `phones that` and `phones word`, we can just make `phones that` do the right thing and select the current word if nothing is selected - ie. if `edit.selected_text()` returns the empty string?\n", "before_files": [{"content": "import os\n\nfrom talon import Context, Module, actions, app, clip, fs, imgui, ui\n\n########################################################################\n# global settings\n########################################################################\n\n# a list of homophones where each line is a comma separated list\n# e.g. where,wear,ware\n# a suitable one can be found here:\n# https://github.com/pimentel/homophones\ncwd = os.path.dirname(os.path.realpath(__file__))\nhomophones_file = os.path.join(cwd, \"homophones.csv\")\n# if quick_replace, then when a word is selected and only one homophone exists,\n# replace it without bringing up the options\nquick_replace = True\nshow_help = False\n########################################################################\n\nctx = Context()\nmod = Module()\nmod.mode(\"homophones\")\nmod.list(\"homophones_canonicals\", desc=\"list of words \")\n\nmain_screen = ui.main_screen()\n\n\ndef update_homophones(name, flags):\n if name != homophones_file:\n return\n\n phones = {}\n canonical_list = []\n with open(homophones_file) as f:\n for line in f:\n words = line.rstrip().split(\",\")\n canonical_list.append(words[0])\n merged_words = set(words)\n for word in words:\n old_words = phones.get(word.lower(), [])\n merged_words.update(old_words)\n merged_words = sorted(merged_words)\n for word in merged_words:\n phones[word.lower()] = merged_words\n\n global all_homophones\n all_homophones = phones\n ctx.lists[\"self.homophones_canonicals\"] = canonical_list\n\n\nupdate_homophones(homophones_file, None)\nfs.watch(cwd, update_homophones)\nactive_word_list = None\nis_selection = False\n\n\ndef close_homophones():\n gui.hide()\n actions.mode.disable(\"user.homophones\")\n\n\nPHONES_FORMATTERS = [\n lambda word: word.capitalize(),\n lambda word: word.upper(),\n]\n\n\ndef find_matching_format_function(word_with_formatting, format_functions):\n \"\"\"Finds the formatter function from a list of formatter functions which transforms a word into itself.\n Returns an identity function if none exists\"\"\"\n for formatter in format_functions:\n formatted_word = formatter(word_with_formatting)\n if word_with_formatting == formatted_word:\n return formatter\n\n return lambda word: word\n\n\ndef raise_homophones(word_to_find_homophones_for, forced=False, selection=False):\n global quick_replace\n global active_word_list\n global show_help\n global force_raise\n global is_selection\n\n force_raise = forced\n is_selection = selection\n\n if is_selection:\n word_to_find_homophones_for = word_to_find_homophones_for.strip()\n\n formatter = find_matching_format_function(\n word_to_find_homophones_for, PHONES_FORMATTERS\n )\n\n word_to_find_homophones_for = word_to_find_homophones_for.lower()\n\n # We support plurals, but very naively. If we can't find your word but your word ends in an s, presume its plural\n # and attempt to find the singular, then present the presumed plurals back. This could be improved!\n if word_to_find_homophones_for in all_homophones:\n valid_homophones = all_homophones[word_to_find_homophones_for]\n elif (\n word_to_find_homophones_for[-1] == \"s\"\n and word_to_find_homophones_for[:-1] in all_homophones\n ):\n valid_homophones = map(\n lambda w: w + \"s\", all_homophones[word_to_find_homophones_for[:-1]]\n )\n else:\n app.notify(\n \"homophones.py\", f'\"{word_to_find_homophones_for}\" not in homophones list'\n )\n return\n\n # Move current word to end of list to reduce searcher's cognitive load\n valid_homophones_reordered = list(\n filter(\n lambda word_from_list: word_from_list.lower()\n != word_to_find_homophones_for,\n valid_homophones,\n )\n ) + [word_to_find_homophones_for]\n active_word_list = list(map(formatter, valid_homophones_reordered))\n\n if (\n is_selection\n and len(active_word_list) == 2\n and quick_replace\n and not force_raise\n ):\n if word_to_find_homophones_for == active_word_list[0].lower():\n new = active_word_list[1]\n else:\n new = active_word_list[0]\n\n clip.set(new)\n actions.edit.paste()\n\n return\n\n actions.mode.enable(\"user.homophones\")\n show_help = False\n gui.show()\n\n\[email protected](x=main_screen.x + main_screen.width / 2.6, y=main_screen.y)\ndef gui(gui: imgui.GUI):\n global active_word_list\n if show_help:\n gui.text(\"Homephone help - todo\")\n else:\n gui.text(\"Select a homophone\")\n gui.line()\n index = 1\n for word in active_word_list:\n if gui.button(f\"Choose {index}: {word}\"):\n actions.insert(actions.user.homophones_select(index))\n actions.user.homophones_hide()\n index = index + 1\n\n if gui.button(\"Phones hide\"):\n actions.user.homophones_hide()\n\n\ndef show_help_gui():\n global show_help\n show_help = True\n gui.show()\n\n\[email protected](rule=\"{self.homophones_canonicals}\")\ndef homophones_canonical(m) -> str:\n \"Returns a single string\"\n return m.homophones_canonicals\n\n\[email protected]_class\nclass Actions:\n def homophones_hide():\n \"\"\"Hides the homophones display\"\"\"\n close_homophones()\n\n def homophones_show(m: str):\n \"\"\"Show the homophones display\"\"\"\n raise_homophones(m, False, False)\n\n def homophones_show_selection():\n \"\"\"Show the homophones display for the selected text\"\"\"\n raise_homophones(actions.edit.selected_text(), False, True)\n\n def homophones_force_show(m: str):\n \"\"\"Show the homophones display forcibly\"\"\"\n raise_homophones(m, True, False)\n\n def homophones_force_show_selection():\n \"\"\"Show the homophones display for the selected text forcibly\"\"\"\n raise_homophones(actions.edit.selected_text(), True, True)\n\n def homophones_select(number: int) -> str:\n \"\"\"selects the homophone by number\"\"\"\n if number <= len(active_word_list) and number > 0:\n return active_word_list[number - 1]\n\n error = \"homophones.py index {} is out of range (1-{})\".format(\n number, len(active_word_list)\n )\n app.notify(error)\n raise error\n\n def homophones_get(word: str) -> [str] or None:\n \"\"\"Get homophones for the given word\"\"\"\n word = word.lower()\n if word in all_homophones:\n return all_homophones[word]\n return None\n", "path": "core/homophones/homophones.py"}]}
2,706
196
gh_patches_debug_8673
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-4423
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> How do we work with documentation redirects from rediraffe in the repo? [...] to make a decision on what to do with regards to how we work with the redirects in this repo for now. If experience is gained and you start feeling confident on a practice, we try go for that more systematically in other repos long term. I think the options on how to work with the redirects are: 1. Fully manual as now, where we figure out ourselves if we need to add a redirect 2. Mostly manual, where we can go fully manual but also suggest that `make rediraffecheckdiff` can be used to get informed on what redirects we should make. 3. Semi automated, where we suggest using `make rediraffewritediff` to update a file (not overwrite right?) 4. Mostly automated, where we have a github workflow triggered when docs change to inspect if a redirect needs to be setup and otherwise fails to let us know. 5. Almost fully automated, where we also push a commit with the relevant redirects. If I understand this correctly, we need to consider this ourselves no matter what because the tools can't figure it out fully in all cases. That makes me think we shouldn't go for option 5. Maybe option 2-4? No matter what, I'm a big :+1: for going for something you feel like going for! _Originally posted by @consideRatio in https://github.com/jupyterhub/jupyterhub/pull/4331#discussion_r1092331738_ </issue> <code> [start of docs/source/conf.py] 1 # Configuration file for Sphinx to build our documentation to HTML. 2 # 3 # Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html 4 # 5 import contextlib 6 import datetime 7 import io 8 import os 9 import subprocess 10 11 from docutils import nodes 12 from sphinx.directives.other import SphinxDirective 13 14 import jupyterhub 15 from jupyterhub.app import JupyterHub 16 17 # -- Project information ----------------------------------------------------- 18 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 19 # 20 project = "JupyterHub" 21 author = "Project Jupyter Contributors" 22 copyright = f"{datetime.date.today().year}, {author}" 23 24 25 # -- General Sphinx configuration -------------------------------------------- 26 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 27 # 28 extensions = [ 29 "sphinx.ext.autodoc", 30 "sphinx.ext.intersphinx", 31 "sphinx.ext.napoleon", 32 "autodoc_traits", 33 "sphinx_copybutton", 34 "sphinx-jsonschema", 35 "sphinxext.opengraph", 36 "sphinxext.rediraffe", 37 "jupyterhub_sphinx_theme", 38 "myst_parser", 39 ] 40 root_doc = "index" 41 source_suffix = [".md"] 42 # default_role let's use use `foo` instead of ``foo`` in rST 43 default_role = "literal" 44 45 46 # -- MyST configuration ------------------------------------------------------ 47 # ref: https://myst-parser.readthedocs.io/en/latest/configuration.html 48 # 49 myst_heading_anchors = 2 50 51 myst_enable_extensions = [ 52 # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html 53 "attrs_inline", 54 "colon_fence", 55 "deflist", 56 "fieldlist", 57 "substitution", 58 ] 59 60 myst_substitutions = { 61 # date example: Dev 07, 2022 62 "date": datetime.date.today().strftime("%b %d, %Y").title(), 63 "version": jupyterhub.__version__, 64 } 65 66 67 # -- Custom directives to generate documentation ----------------------------- 68 # ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html 69 # 70 # We define custom directives to help us generate documentation using Python on 71 # demand when referenced from our documentation files. 72 # 73 74 # Create a temp instance of JupyterHub for use by two separate directive classes 75 # to get the output from using the "--generate-config" and "--help-all" CLI 76 # flags respectively. 77 # 78 jupyterhub_app = JupyterHub() 79 80 81 class ConfigDirective(SphinxDirective): 82 """Generate the configuration file output for use in the documentation.""" 83 84 has_content = False 85 required_arguments = 0 86 optional_arguments = 0 87 final_argument_whitespace = False 88 option_spec = {} 89 90 def run(self): 91 # The generated configuration file for this version 92 generated_config = jupyterhub_app.generate_config_file() 93 # post-process output 94 home_dir = os.environ["HOME"] 95 generated_config = generated_config.replace(home_dir, "$HOME", 1) 96 par = nodes.literal_block(text=generated_config) 97 return [par] 98 99 100 class HelpAllDirective(SphinxDirective): 101 """Print the output of jupyterhub help --all for use in the documentation.""" 102 103 has_content = False 104 required_arguments = 0 105 optional_arguments = 0 106 final_argument_whitespace = False 107 option_spec = {} 108 109 def run(self): 110 # The output of the help command for this version 111 buffer = io.StringIO() 112 with contextlib.redirect_stdout(buffer): 113 jupyterhub_app.print_help("--help-all") 114 all_help = buffer.getvalue() 115 # post-process output 116 home_dir = os.environ["HOME"] 117 all_help = all_help.replace(home_dir, "$HOME", 1) 118 par = nodes.literal_block(text=all_help) 119 return [par] 120 121 122 def setup(app): 123 app.add_css_file("custom.css") 124 app.add_directive("jupyterhub-generate-config", ConfigDirective) 125 app.add_directive("jupyterhub-help-all", HelpAllDirective) 126 127 128 # -- Read The Docs ----------------------------------------------------------- 129 # 130 # Since RTD runs sphinx-build directly without running "make html", we run the 131 # pre-requisite steps for "make html" from here if needed. 132 # 133 if os.environ.get("READTHEDOCS"): 134 docs = os.path.dirname(os.path.dirname(__file__)) 135 subprocess.check_call(["make", "metrics", "scopes"], cwd=docs) 136 137 138 # -- Spell checking ---------------------------------------------------------- 139 # ref: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#configuration-options 140 # 141 # The "sphinxcontrib.spelling" extension is optionally enabled if its available. 142 # 143 try: 144 import sphinxcontrib.spelling # noqa 145 except ImportError: 146 pass 147 else: 148 extensions.append("sphinxcontrib.spelling") 149 spelling_word_list_filename = "spelling_wordlist.txt" 150 151 152 # -- Options for HTML output ------------------------------------------------- 153 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 154 # 155 html_logo = "_static/images/logo/logo.png" 156 html_favicon = "_static/images/logo/favicon.ico" 157 html_static_path = ["_static"] 158 159 html_theme = "jupyterhub_sphinx_theme" 160 html_theme_options = { 161 "icon_links": [ 162 { 163 "name": "GitHub", 164 "url": "https://github.com/jupyterhub/jupyterhub", 165 "icon": "fa-brands fa-github", 166 }, 167 ], 168 "use_edit_page_button": True, 169 "navbar_align": "left", 170 } 171 html_context = { 172 "github_user": "jupyterhub", 173 "github_repo": "jupyterhub", 174 "github_version": "main", 175 "doc_path": "docs/source", 176 } 177 178 179 # -- Options for linkcheck builder ------------------------------------------- 180 # ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder 181 # 182 linkcheck_ignore = [ 183 r"(.*)github\.com(.*)#", # javascript based anchors 184 r"(.*)/#%21(.*)/(.*)", # /#!forum/jupyter - encoded anchor edge case 185 r"https://github.com/[^/]*$", # too many github usernames / searches in changelog 186 "https://github.com/jupyterhub/jupyterhub/pull/", # too many PRs in changelog 187 "https://github.com/jupyterhub/jupyterhub/compare/", # too many comparisons in changelog 188 r"https?://(localhost|127.0.0.1).*", # ignore localhost references in auto-links 189 r".*/rest-api.html#.*", # ignore javascript-resolved internal rest-api links 190 r"https://jupyter.chameleoncloud.org", # FIXME: ignore (presumably) short-term SSL issue 191 ] 192 linkcheck_anchors_ignore = [ 193 "/#!", 194 "/#%21", 195 ] 196 197 # -- Intersphinx ------------------------------------------------------------- 198 # ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration 199 # 200 intersphinx_mapping = { 201 "python": ("https://docs.python.org/3/", None), 202 "tornado": ("https://www.tornadoweb.org/en/stable/", None), 203 "jupyter-server": ("https://jupyter-server.readthedocs.io/en/stable/", None), 204 } 205 206 # -- Options for the opengraph extension ------------------------------------- 207 # ref: https://github.com/wpilibsuite/sphinxext-opengraph#options 208 # 209 # ogp_site_url is set automatically by RTD 210 ogp_image = "_static/logo.png" 211 ogp_use_first_image = True 212 213 214 # -- Options for the rediraffe extension ------------------------------------- 215 # ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme 216 # 217 # This extension helps us relocate content without breaking links. If a 218 # document is moved internally, a redirect link should be configured as below to 219 # help us not break links. 220 # 221 # The workflow for adding redirects can be as follows: 222 # 1. Change "rediraffe_branch" below to point to the commit/ branch you 223 # want to base off the changes. 224 # 2. Option 1: run "make rediraffecheckdiff" 225 # a. Analyze the output of this command. 226 # b. Manually add the redirect entries to the "redirects.txt" file. 227 # Option 2: run "make rediraffewritediff" 228 # a. rediraffe will then automatically add the obvious redirects to redirects.txt. 229 # b. Analyze the output of the command for broken links. 230 # c. Check the "redirects.txt" file for any files that were moved/ renamed but are not listed. 231 # d. Manually add the redirects that have been mised by the automatic builder to "redirects.txt". 232 # Option 3: Do not use the commands above and, instead, do everything manually - by taking 233 # note of the files you have moved or renamed and adding them to the "redirects.txt" file. 234 # 235 # If you are basing changes off another branch/ commit, always change back 236 # rediraffe_branch to main before pushing your changes upstream. 237 # 238 rediraffe_branch = "main" 239 rediraffe_redirects = "redirects.txt" 240 # rediraffe_redirects = { 241 # "old-file": "new-folder/new-file-name", 242 # } 243 [end of docs/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -235,8 +235,12 @@ # If you are basing changes off another branch/ commit, always change back # rediraffe_branch to main before pushing your changes upstream. # -rediraffe_branch = "main" +rediraffe_branch = os.environ.get("REDIRAFFE_BRANCH", "main") rediraffe_redirects = "redirects.txt" + +# allow 80% match for autogenerated redirects +rediraffe_auto_redirect_perc = 80 + # rediraffe_redirects = { # "old-file": "new-folder/new-file-name", # }
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -235,8 +235,12 @@\n # If you are basing changes off another branch/ commit, always change back\n # rediraffe_branch to main before pushing your changes upstream.\n #\n-rediraffe_branch = \"main\"\n+rediraffe_branch = os.environ.get(\"REDIRAFFE_BRANCH\", \"main\")\n rediraffe_redirects = \"redirects.txt\"\n+\n+# allow 80% match for autogenerated redirects\n+rediraffe_auto_redirect_perc = 80\n+\n # rediraffe_redirects = {\n # \"old-file\": \"new-folder/new-file-name\",\n # }\n", "issue": "How do we work with documentation redirects from rediraffe in the repo?\n[...] to make a decision on what to do with regards to how we work with the redirects in this repo for now. If experience is gained and you start feeling confident on a practice, we try go for that more systematically in other repos long term.\r\n\r\nI think the options on how to work with the redirects are:\r\n\r\n1. Fully manual as now, where we figure out ourselves if we need to add a redirect\r\n2. Mostly manual, where we can go fully manual but also suggest that `make rediraffecheckdiff` can be used to get informed on what redirects we should make.\r\n3. Semi automated, where we suggest using `make rediraffewritediff` to update a file (not overwrite right?)\r\n4. Mostly automated, where we have a github workflow triggered when docs change to inspect if a redirect needs to be setup and otherwise fails to let us know.\r\n5. Almost fully automated, where we also push a commit with the relevant redirects.\r\n\r\nIf I understand this correctly, we need to consider this ourselves no matter what because the tools can't figure it out fully in all cases. That makes me think we shouldn't go for option 5. Maybe option 2-4? No matter what, I'm a big :+1: for going for something you feel like going for!\r\n\r\n_Originally posted by @consideRatio in https://github.com/jupyterhub/jupyterhub/pull/4331#discussion_r1092331738_\r\n \n", "before_files": [{"content": "# Configuration file for Sphinx to build our documentation to HTML.\n#\n# Configuration reference: https://www.sphinx-doc.org/en/master/usage/configuration.html\n#\nimport contextlib\nimport datetime\nimport io\nimport os\nimport subprocess\n\nfrom docutils import nodes\nfrom sphinx.directives.other import SphinxDirective\n\nimport jupyterhub\nfrom jupyterhub.app import JupyterHub\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n#\nproject = \"JupyterHub\"\nauthor = \"Project Jupyter Contributors\"\ncopyright = f\"{datetime.date.today().year}, {author}\"\n\n\n# -- General Sphinx configuration --------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n#\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"autodoc_traits\",\n \"sphinx_copybutton\",\n \"sphinx-jsonschema\",\n \"sphinxext.opengraph\",\n \"sphinxext.rediraffe\",\n \"jupyterhub_sphinx_theme\",\n \"myst_parser\",\n]\nroot_doc = \"index\"\nsource_suffix = [\".md\"]\n# default_role let's use use `foo` instead of ``foo`` in rST\ndefault_role = \"literal\"\n\n\n# -- MyST configuration ------------------------------------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/configuration.html\n#\nmyst_heading_anchors = 2\n\nmyst_enable_extensions = [\n # available extensions: https://myst-parser.readthedocs.io/en/latest/syntax/optional.html\n \"attrs_inline\",\n \"colon_fence\",\n \"deflist\",\n \"fieldlist\",\n \"substitution\",\n]\n\nmyst_substitutions = {\n # date example: Dev 07, 2022\n \"date\": datetime.date.today().strftime(\"%b %d, %Y\").title(),\n \"version\": jupyterhub.__version__,\n}\n\n\n# -- Custom directives to generate documentation -----------------------------\n# ref: https://myst-parser.readthedocs.io/en/latest/syntax/roles-and-directives.html\n#\n# We define custom directives to help us generate documentation using Python on\n# demand when referenced from our documentation files.\n#\n\n# Create a temp instance of JupyterHub for use by two separate directive classes\n# to get the output from using the \"--generate-config\" and \"--help-all\" CLI\n# flags respectively.\n#\njupyterhub_app = JupyterHub()\n\n\nclass ConfigDirective(SphinxDirective):\n \"\"\"Generate the configuration file output for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The generated configuration file for this version\n generated_config = jupyterhub_app.generate_config_file()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n generated_config = generated_config.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=generated_config)\n return [par]\n\n\nclass HelpAllDirective(SphinxDirective):\n \"\"\"Print the output of jupyterhub help --all for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The output of the help command for this version\n buffer = io.StringIO()\n with contextlib.redirect_stdout(buffer):\n jupyterhub_app.print_help(\"--help-all\")\n all_help = buffer.getvalue()\n # post-process output\n home_dir = os.environ[\"HOME\"]\n all_help = all_help.replace(home_dir, \"$HOME\", 1)\n par = nodes.literal_block(text=all_help)\n return [par]\n\n\ndef setup(app):\n app.add_css_file(\"custom.css\")\n app.add_directive(\"jupyterhub-generate-config\", ConfigDirective)\n app.add_directive(\"jupyterhub-help-all\", HelpAllDirective)\n\n\n# -- Read The Docs -----------------------------------------------------------\n#\n# Since RTD runs sphinx-build directly without running \"make html\", we run the\n# pre-requisite steps for \"make html\" from here if needed.\n#\nif os.environ.get(\"READTHEDOCS\"):\n docs = os.path.dirname(os.path.dirname(__file__))\n subprocess.check_call([\"make\", \"metrics\", \"scopes\"], cwd=docs)\n\n\n# -- Spell checking ----------------------------------------------------------\n# ref: https://sphinxcontrib-spelling.readthedocs.io/en/latest/customize.html#configuration-options\n#\n# The \"sphinxcontrib.spelling\" extension is optionally enabled if its available.\n#\ntry:\n import sphinxcontrib.spelling # noqa\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\nspelling_word_list_filename = \"spelling_wordlist.txt\"\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n#\nhtml_logo = \"_static/images/logo/logo.png\"\nhtml_favicon = \"_static/images/logo/favicon.ico\"\nhtml_static_path = [\"_static\"]\n\nhtml_theme = \"jupyterhub_sphinx_theme\"\nhtml_theme_options = {\n \"icon_links\": [\n {\n \"name\": \"GitHub\",\n \"url\": \"https://github.com/jupyterhub/jupyterhub\",\n \"icon\": \"fa-brands fa-github\",\n },\n ],\n \"use_edit_page_button\": True,\n \"navbar_align\": \"left\",\n}\nhtml_context = {\n \"github_user\": \"jupyterhub\",\n \"github_repo\": \"jupyterhub\",\n \"github_version\": \"main\",\n \"doc_path\": \"docs/source\",\n}\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\n#\nlinkcheck_ignore = [\n r\"(.*)github\\.com(.*)#\", # javascript based anchors\n r\"(.*)/#%21(.*)/(.*)\", # /#!forum/jupyter - encoded anchor edge case\n r\"https://github.com/[^/]*$\", # too many github usernames / searches in changelog\n \"https://github.com/jupyterhub/jupyterhub/pull/\", # too many PRs in changelog\n \"https://github.com/jupyterhub/jupyterhub/compare/\", # too many comparisons in changelog\n r\"https?://(localhost|127.0.0.1).*\", # ignore localhost references in auto-links\n r\".*/rest-api.html#.*\", # ignore javascript-resolved internal rest-api links\n r\"https://jupyter.chameleoncloud.org\", # FIXME: ignore (presumably) short-term SSL issue\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n# -- Intersphinx -------------------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html#configuration\n#\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"tornado\": (\"https://www.tornadoweb.org/en/stable/\", None),\n \"jupyter-server\": (\"https://jupyter-server.readthedocs.io/en/stable/\", None),\n}\n\n# -- Options for the opengraph extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-opengraph#options\n#\n# ogp_site_url is set automatically by RTD\nogp_image = \"_static/logo.png\"\nogp_use_first_image = True\n\n\n# -- Options for the rediraffe extension -------------------------------------\n# ref: https://github.com/wpilibsuite/sphinxext-rediraffe#readme\n#\n# This extension helps us relocate content without breaking links. If a\n# document is moved internally, a redirect link should be configured as below to\n# help us not break links.\n#\n# The workflow for adding redirects can be as follows:\n# 1. Change \"rediraffe_branch\" below to point to the commit/ branch you\n# want to base off the changes.\n# 2. Option 1: run \"make rediraffecheckdiff\"\n# a. Analyze the output of this command.\n# b. Manually add the redirect entries to the \"redirects.txt\" file.\n# Option 2: run \"make rediraffewritediff\"\n# a. rediraffe will then automatically add the obvious redirects to redirects.txt.\n# b. Analyze the output of the command for broken links.\n# c. Check the \"redirects.txt\" file for any files that were moved/ renamed but are not listed.\n# d. Manually add the redirects that have been mised by the automatic builder to \"redirects.txt\".\n# Option 3: Do not use the commands above and, instead, do everything manually - by taking\n# note of the files you have moved or renamed and adding them to the \"redirects.txt\" file.\n#\n# If you are basing changes off another branch/ commit, always change back\n# rediraffe_branch to main before pushing your changes upstream.\n#\nrediraffe_branch = \"main\"\nrediraffe_redirects = \"redirects.txt\"\n# rediraffe_redirects = {\n# \"old-file\": \"new-folder/new-file-name\",\n# }\n", "path": "docs/source/conf.py"}]}
3,511
159
gh_patches_debug_21011
rasdani/github-patches
git_diff
ivy-llc__ivy-18334
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> compress </issue> <code> [start of ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py] 1 import ivy 2 from ivy.functional.frontends.numpy.func_wrapper import ( 3 to_ivy_arrays_and_back, 4 inputs_to_ivy_arrays, 5 ) 6 7 8 @to_ivy_arrays_and_back 9 def take_along_axis(arr, indices, axis): 10 return ivy.take_along_axis(arr, indices, axis) 11 12 13 @to_ivy_arrays_and_back 14 def tril_indices(n, k=0, m=None): 15 return ivy.tril_indices(n, m, k) 16 17 18 @to_ivy_arrays_and_back 19 def indices(dimensions, dtype=int, sparse=False): 20 dimensions = tuple(dimensions) 21 N = len(dimensions) 22 shape = (1,) * N 23 if sparse: 24 res = tuple() 25 else: 26 res = ivy.empty((N,) + dimensions, dtype=dtype) 27 for i, dim in enumerate(dimensions): 28 idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :]) 29 if sparse: 30 res = res + (idx,) 31 else: 32 res[i] = idx 33 return res 34 35 36 # unravel_index 37 @to_ivy_arrays_and_back 38 def unravel_index(indices, shape, order="C"): 39 ret = [x.astype("int64") for x in ivy.unravel_index(indices, shape)] 40 return tuple(ret) 41 42 43 @to_ivy_arrays_and_back 44 def fill_diagonal(a, val, wrap=False): 45 if a.ndim < 2: 46 raise ValueError("array must be at least 2-d") 47 end = None 48 if a.ndim == 2: 49 # Explicit, fast formula for the common case. For 2-d arrays, we 50 # accept rectangular ones. 51 step = a.shape[1] + 1 52 # This is needed to don't have tall matrix have the diagonal wrap. 53 if not wrap: 54 end = a.shape[1] * a.shape[1] 55 else: 56 # For more than d=2, the strided formula is only valid for arrays with 57 # all dimensions equal, so we check first. 58 if not ivy.all(ivy.diff(a.shape) == 0): 59 raise ValueError("All dimensions of input must be of equal length") 60 step = 1 + ivy.sum(ivy.cumprod(a.shape[:-1])) 61 62 # Write the value out into the diagonal. 63 shape = a.shape 64 a = ivy.reshape(a, a.size) 65 a[:end:step] = val 66 a = ivy.reshape(a, shape) 67 68 69 @inputs_to_ivy_arrays 70 def put_along_axis(arr, indices, values, axis): 71 ivy.put_along_axis(arr, indices, values, axis) 72 73 74 def diag(v, k=0): 75 return ivy.diag(v, k=k) 76 77 78 @to_ivy_arrays_and_back 79 def diagonal(a, offset, axis1, axis2): 80 return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) 81 [end of ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py --- a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py +++ b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py @@ -2,6 +2,7 @@ from ivy.functional.frontends.numpy.func_wrapper import ( to_ivy_arrays_and_back, inputs_to_ivy_arrays, + handle_numpy_out, ) @@ -78,3 +79,22 @@ @to_ivy_arrays_and_back def diagonal(a, offset, axis1, axis2): return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2) + + +@to_ivy_arrays_and_back +@handle_numpy_out +def compress(condition, a, axis=None, out=None): + condition_arr = ivy.asarray(condition).astype(bool) + if condition_arr.ndim != 1: + raise ivy.utils.exceptions.IvyException("Condition must be a 1D array") + if axis is None: + arr = ivy.asarray(a).flatten() + axis = 0 + else: + arr = ivy.moveaxis(a, axis, 0) + if condition_arr.shape[0] > arr.shape[0]: + raise ivy.utils.exceptions.IvyException( + "Condition contains entries that are out of bounds" + ) + arr = arr[: condition_arr.shape[0]] + return ivy.moveaxis(arr[condition_arr], 0, axis)
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n--- a/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n+++ b/ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py\n@@ -2,6 +2,7 @@\n from ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n inputs_to_ivy_arrays,\n+ handle_numpy_out,\n )\n \n \n@@ -78,3 +79,22 @@\n @to_ivy_arrays_and_back\n def diagonal(a, offset, axis1, axis2):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n+\n+\n+@to_ivy_arrays_and_back\n+@handle_numpy_out\n+def compress(condition, a, axis=None, out=None):\n+ condition_arr = ivy.asarray(condition).astype(bool)\n+ if condition_arr.ndim != 1:\n+ raise ivy.utils.exceptions.IvyException(\"Condition must be a 1D array\")\n+ if axis is None:\n+ arr = ivy.asarray(a).flatten()\n+ axis = 0\n+ else:\n+ arr = ivy.moveaxis(a, axis, 0)\n+ if condition_arr.shape[0] > arr.shape[0]:\n+ raise ivy.utils.exceptions.IvyException(\n+ \"Condition contains entries that are out of bounds\"\n+ )\n+ arr = arr[: condition_arr.shape[0]]\n+ return ivy.moveaxis(arr[condition_arr], 0, axis)\n", "issue": "compress\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import (\n to_ivy_arrays_and_back,\n inputs_to_ivy_arrays,\n)\n\n\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@to_ivy_arrays_and_back\ndef tril_indices(n, k=0, m=None):\n return ivy.tril_indices(n, m, k)\n\n\n@to_ivy_arrays_and_back\ndef indices(dimensions, dtype=int, sparse=False):\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = tuple()\n else:\n res = ivy.empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = ivy.arange(dim, dtype=dtype).reshape(shape[:i] + (dim,) + shape[i + 1 :])\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n# unravel_index\n@to_ivy_arrays_and_back\ndef unravel_index(indices, shape, order=\"C\"):\n ret = [x.astype(\"int64\") for x in ivy.unravel_index(indices, shape)]\n return tuple(ret)\n\n\n@to_ivy_arrays_and_back\ndef fill_diagonal(a, val, wrap=False):\n if a.ndim < 2:\n raise ValueError(\"array must be at least 2-d\")\n end = None\n if a.ndim == 2:\n # Explicit, fast formula for the common case. For 2-d arrays, we\n # accept rectangular ones.\n step = a.shape[1] + 1\n # This is needed to don't have tall matrix have the diagonal wrap.\n if not wrap:\n end = a.shape[1] * a.shape[1]\n else:\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not ivy.all(ivy.diff(a.shape) == 0):\n raise ValueError(\"All dimensions of input must be of equal length\")\n step = 1 + ivy.sum(ivy.cumprod(a.shape[:-1]))\n\n # Write the value out into the diagonal.\n shape = a.shape\n a = ivy.reshape(a, a.size)\n a[:end:step] = val\n a = ivy.reshape(a, shape)\n\n\n@inputs_to_ivy_arrays\ndef put_along_axis(arr, indices, values, axis):\n ivy.put_along_axis(arr, indices, values, axis)\n\n\ndef diag(v, k=0):\n return ivy.diag(v, k=k)\n\n\n@to_ivy_arrays_and_back\ndef diagonal(a, offset, axis1, axis2):\n return ivy.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)\n", "path": "ivy/functional/frontends/numpy/indexing_routines/indexing_like_operations.py"}]}
1,368
368
gh_patches_debug_16116
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1273
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add overlay tiles layers from settings https://github.com/makinacorpus/django-leaflet/issues/83 Would allow to show cadastral parcels over orthophoto, "plans de circulation" etc. </issue> <code> [start of geotrek/settings/dev.py] 1 from .default import * # NOQA 2 3 # 4 # Django Development 5 # .......................... 6 7 DEBUG = True 8 TEMPLATE_DEBUG = True 9 10 SOUTH_TESTS_MIGRATE = False # Tested at settings.tests 11 12 # 13 # Developper Toolbar 14 # .......................... 15 16 INSTALLED_APPS = ( 17 # 'debug_toolbar', 18 'django_extensions', 19 ) + INSTALLED_APPS 20 21 # 22 # Use Geotrek preprod tiles (uses default extent) 23 # ................................................ 24 25 LEAFLET_CONFIG['TILES'] = [ 26 (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'), 27 (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'), 28 ] 29 LEAFLET_CONFIG['SRID'] = 3857 30 31 LOGGING['loggers']['geotrek']['level'] = 'DEBUG' 32 LOGGING['loggers']['']['level'] = 'DEBUG' 33 [end of geotrek/settings/dev.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geotrek/settings/dev.py b/geotrek/settings/dev.py --- a/geotrek/settings/dev.py +++ b/geotrek/settings/dev.py @@ -19,13 +19,17 @@ ) + INSTALLED_APPS # -# Use Geotrek preprod tiles (uses default extent) -# ................................................ +# Use some default tiles +# .......................... LEAFLET_CONFIG['TILES'] = [ (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'), (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'), ] +LEAFLET_CONFIG['OVERLAYS'] = [ + (gettext_noop('Coeur de parc'), 'http://{s}.tilestream.makina-corpus.net/v2/coeur-ecrins/{z}/{x}/{y}.png', 'Ecrins'), +] + LEAFLET_CONFIG['SRID'] = 3857 LOGGING['loggers']['geotrek']['level'] = 'DEBUG'
{"golden_diff": "diff --git a/geotrek/settings/dev.py b/geotrek/settings/dev.py\n--- a/geotrek/settings/dev.py\n+++ b/geotrek/settings/dev.py\n@@ -19,13 +19,17 @@\n ) + INSTALLED_APPS\n \n #\n-# Use Geotrek preprod tiles (uses default extent)\n-# ................................................\n+# Use some default tiles\n+# ..........................\n \n LEAFLET_CONFIG['TILES'] = [\n (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'),\n (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'),\n ]\n+LEAFLET_CONFIG['OVERLAYS'] = [\n+ (gettext_noop('Coeur de parc'), 'http://{s}.tilestream.makina-corpus.net/v2/coeur-ecrins/{z}/{x}/{y}.png', 'Ecrins'),\n+]\n+\n LEAFLET_CONFIG['SRID'] = 3857\n \n LOGGING['loggers']['geotrek']['level'] = 'DEBUG'\n", "issue": "Add overlay tiles layers from settings\nhttps://github.com/makinacorpus/django-leaflet/issues/83 \n\nWould allow to show cadastral parcels over orthophoto, \"plans de circulation\" etc.\n\n", "before_files": [{"content": "from .default import * # NOQA\n\n#\n# Django Development\n# ..........................\n\nDEBUG = True\nTEMPLATE_DEBUG = True\n\nSOUTH_TESTS_MIGRATE = False # Tested at settings.tests\n\n#\n# Developper Toolbar\n# ..........................\n\nINSTALLED_APPS = (\n # 'debug_toolbar',\n 'django_extensions',\n) + INSTALLED_APPS\n\n#\n# Use Geotrek preprod tiles (uses default extent)\n# ................................................\n\nLEAFLET_CONFIG['TILES'] = [\n (gettext_noop('Scan'), 'http://{s}.tile.osm.org/{z}/{x}/{y}.png', '(c) OpenStreetMap Contributors'),\n (gettext_noop('Ortho'), 'http://{s}.tiles.mapbox.com/v3/openstreetmap.map-4wvf9l0l/{z}/{x}/{y}.jpg', '(c) MapBox'),\n]\nLEAFLET_CONFIG['SRID'] = 3857\n\nLOGGING['loggers']['geotrek']['level'] = 'DEBUG'\nLOGGING['loggers']['']['level'] = 'DEBUG'\n", "path": "geotrek/settings/dev.py"}]}
873
272
gh_patches_debug_18592
rasdani/github-patches
git_diff
aio-libs__aiohttp-5121
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> option to disable automatic client response body decompression enhancement for https://github.com/aio-libs/aiohttp/issues/1992 </issue> <code> [start of aiohttp/resolver.py] 1 import asyncio 2 import socket 3 from typing import Any, Dict, List, Optional 4 5 from .abc import AbstractResolver 6 from .helpers import get_running_loop 7 8 __all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver") 9 10 try: 11 import aiodns 12 13 # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname') 14 except ImportError: # pragma: no cover 15 aiodns = None 16 17 aiodns_default = False 18 19 20 class ThreadedResolver(AbstractResolver): 21 """Use Executor for synchronous getaddrinfo() calls, which defaults to 22 concurrent.futures.ThreadPoolExecutor. 23 """ 24 25 def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: 26 self._loop = get_running_loop(loop) 27 28 async def resolve( 29 self, host: str, port: int = 0, family: int = socket.AF_INET 30 ) -> List[Dict[str, Any]]: 31 infos = await self._loop.getaddrinfo( 32 host, port, type=socket.SOCK_STREAM, family=family 33 ) 34 35 hosts = [] 36 for family, _, proto, _, address in infos: 37 if family == socket.AF_INET6 and address[3]: # type: ignore 38 # This is essential for link-local IPv6 addresses. 39 # LL IPv6 is a VERY rare case. Strictly speaking, we should use 40 # getnameinfo() unconditionally, but performance makes sense. 41 host, _port = socket.getnameinfo( 42 address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV 43 ) 44 port = int(_port) 45 else: 46 host, port = address[:2] 47 hosts.append( 48 { 49 "hostname": host, 50 "host": host, 51 "port": port, 52 "family": family, 53 "proto": proto, 54 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, 55 } 56 ) 57 58 return hosts 59 60 async def close(self) -> None: 61 pass 62 63 64 class AsyncResolver(AbstractResolver): 65 """Use the `aiodns` package to make asynchronous DNS lookups""" 66 67 def __init__( 68 self, 69 loop: Optional[asyncio.AbstractEventLoop] = None, 70 *args: Any, 71 **kwargs: Any 72 ) -> None: 73 if aiodns is None: 74 raise RuntimeError("Resolver requires aiodns library") 75 76 self._loop = get_running_loop(loop) 77 self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs) 78 79 if not hasattr(self._resolver, "gethostbyname"): 80 # aiodns 1.1 is not available, fallback to DNSResolver.query 81 self.resolve = self._resolve_with_query # type: ignore 82 83 async def resolve( 84 self, host: str, port: int = 0, family: int = socket.AF_INET 85 ) -> List[Dict[str, Any]]: 86 try: 87 resp = await self._resolver.gethostbyname(host, family) 88 except aiodns.error.DNSError as exc: 89 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" 90 raise OSError(msg) from exc 91 hosts = [] 92 for address in resp.addresses: 93 hosts.append( 94 { 95 "hostname": host, 96 "host": address, 97 "port": port, 98 "family": family, 99 "proto": 0, 100 "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV, 101 } 102 ) 103 104 if not hosts: 105 raise OSError("DNS lookup failed") 106 107 return hosts 108 109 async def _resolve_with_query( 110 self, host: str, port: int = 0, family: int = socket.AF_INET 111 ) -> List[Dict[str, Any]]: 112 if family == socket.AF_INET6: 113 qtype = "AAAA" 114 else: 115 qtype = "A" 116 117 try: 118 resp = await self._resolver.query(host, qtype) 119 except aiodns.error.DNSError as exc: 120 msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed" 121 raise OSError(msg) from exc 122 123 hosts = [] 124 for rr in resp: 125 hosts.append( 126 { 127 "hostname": host, 128 "host": rr.host, 129 "port": port, 130 "family": family, 131 "proto": 0, 132 "flags": socket.AI_NUMERICHOST, 133 } 134 ) 135 136 if not hosts: 137 raise OSError("DNS lookup failed") 138 139 return hosts 140 141 async def close(self) -> None: 142 return self._resolver.cancel() 143 144 145 DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver 146 [end of aiohttp/resolver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py --- a/aiohttp/resolver.py +++ b/aiohttp/resolver.py @@ -26,10 +26,10 @@ self._loop = get_running_loop(loop) async def resolve( - self, host: str, port: int = 0, family: int = socket.AF_INET + self, hostname: str, port: int = 0, family: int = socket.AF_INET ) -> List[Dict[str, Any]]: infos = await self._loop.getaddrinfo( - host, port, type=socket.SOCK_STREAM, family=family + hostname, port, type=socket.SOCK_STREAM, family=family ) hosts = [] @@ -46,7 +46,7 @@ host, port = address[:2] hosts.append( { - "hostname": host, + "hostname": hostname, "host": host, "port": port, "family": family,
{"golden_diff": "diff --git a/aiohttp/resolver.py b/aiohttp/resolver.py\n--- a/aiohttp/resolver.py\n+++ b/aiohttp/resolver.py\n@@ -26,10 +26,10 @@\n self._loop = get_running_loop(loop)\n \n async def resolve(\n- self, host: str, port: int = 0, family: int = socket.AF_INET\n+ self, hostname: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n- host, port, type=socket.SOCK_STREAM, family=family\n+ hostname, port, type=socket.SOCK_STREAM, family=family\n )\n \n hosts = []\n@@ -46,7 +46,7 @@\n host, port = address[:2]\n hosts.append(\n {\n- \"hostname\": host,\n+ \"hostname\": hostname,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n", "issue": "option to disable automatic client response body decompression\nenhancement for https://github.com/aio-libs/aiohttp/issues/1992\n", "before_files": [{"content": "import asyncio\nimport socket\nfrom typing import Any, Dict, List, Optional\n\nfrom .abc import AbstractResolver\nfrom .helpers import get_running_loop\n\n__all__ = (\"ThreadedResolver\", \"AsyncResolver\", \"DefaultResolver\")\n\ntry:\n import aiodns\n\n # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')\nexcept ImportError: # pragma: no cover\n aiodns = None\n\naiodns_default = False\n\n\nclass ThreadedResolver(AbstractResolver):\n \"\"\"Use Executor for synchronous getaddrinfo() calls, which defaults to\n concurrent.futures.ThreadPoolExecutor.\n \"\"\"\n\n def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:\n self._loop = get_running_loop(loop)\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n infos = await self._loop.getaddrinfo(\n host, port, type=socket.SOCK_STREAM, family=family\n )\n\n hosts = []\n for family, _, proto, _, address in infos:\n if family == socket.AF_INET6 and address[3]: # type: ignore\n # This is essential for link-local IPv6 addresses.\n # LL IPv6 is a VERY rare case. Strictly speaking, we should use\n # getnameinfo() unconditionally, but performance makes sense.\n host, _port = socket.getnameinfo(\n address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV\n )\n port = int(_port)\n else:\n host, port = address[:2]\n hosts.append(\n {\n \"hostname\": host,\n \"host\": host,\n \"port\": port,\n \"family\": family,\n \"proto\": proto,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n return hosts\n\n async def close(self) -> None:\n pass\n\n\nclass AsyncResolver(AbstractResolver):\n \"\"\"Use the `aiodns` package to make asynchronous DNS lookups\"\"\"\n\n def __init__(\n self,\n loop: Optional[asyncio.AbstractEventLoop] = None,\n *args: Any,\n **kwargs: Any\n ) -> None:\n if aiodns is None:\n raise RuntimeError(\"Resolver requires aiodns library\")\n\n self._loop = get_running_loop(loop)\n self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)\n\n if not hasattr(self._resolver, \"gethostbyname\"):\n # aiodns 1.1 is not available, fallback to DNSResolver.query\n self.resolve = self._resolve_with_query # type: ignore\n\n async def resolve(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n try:\n resp = await self._resolver.gethostbyname(host, family)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n hosts = []\n for address in resp.addresses:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": address,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def _resolve_with_query(\n self, host: str, port: int = 0, family: int = socket.AF_INET\n ) -> List[Dict[str, Any]]:\n if family == socket.AF_INET6:\n qtype = \"AAAA\"\n else:\n qtype = \"A\"\n\n try:\n resp = await self._resolver.query(host, qtype)\n except aiodns.error.DNSError as exc:\n msg = exc.args[1] if len(exc.args) >= 1 else \"DNS lookup failed\"\n raise OSError(msg) from exc\n\n hosts = []\n for rr in resp:\n hosts.append(\n {\n \"hostname\": host,\n \"host\": rr.host,\n \"port\": port,\n \"family\": family,\n \"proto\": 0,\n \"flags\": socket.AI_NUMERICHOST,\n }\n )\n\n if not hosts:\n raise OSError(\"DNS lookup failed\")\n\n return hosts\n\n async def close(self) -> None:\n return self._resolver.cancel()\n\n\nDefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver\n", "path": "aiohttp/resolver.py"}]}
1,929
231
gh_patches_debug_7305
rasdani/github-patches
git_diff
streamlink__streamlink-2230
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update dependencies with security vulnerabilities As mentioned here: https://github.com/streamlink/streamlink/pull/2199#issuecomment-447567326 1. urllib3 > Vulnerable versions: < 1.23 > Patched version: 1.23 > urllib3 before version 1.23 does not remove the Authorization HTTP header when following a cross-origin redirect (i.e., a redirect that differs in host, port, or scheme). This can allow for credentials in the Authorization header to be exposed to unintended hosts or transmitted in cleartext. https://nvd.nist.gov/vuln/detail/CVE-2018-20060 2. requests > Vulnerable versions: <= 2.19.1 > Patched version: 2.20.0 > The Requests package through 2.19.1 before 2018-09-14 for Python sends an HTTP Authorization header to an http URI upon receiving a same-hostname https-to-http redirect, which makes it easier for remote attackers to discover credentials by sniffing the network. https://nvd.nist.gov/vuln/detail/CVE-2018-18074 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import codecs 3 from os import environ 4 from os import path 5 from sys import path as sys_path 6 7 from setuptools import setup, find_packages 8 9 import versioneer 10 11 deps = [ 12 # Require backport of concurrent.futures on Python 2 13 'futures;python_version<"3.0"', 14 # Require singledispatch on Python <3.4 15 'singledispatch;python_version<"3.4"', 16 "requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0", 17 'urllib3[secure]<1.23,>=1.21.1;python_version<"3.0"', 18 "isodate", 19 "websocket-client", 20 # Support for SOCKS proxies 21 "PySocks!=1.5.7,>=1.5.6", 22 # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet 23 # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x 24 'win-inet-pton;python_version<"3.0" and platform_system=="Windows"', 25 # shutil.get_terminal_size and which were added in Python 3.3 26 'backports.shutil_which;python_version<"3.3"', 27 'backports.shutil_get_terminal_size;python_version<"3.3"' 28 ] 29 30 # for encrypted streams 31 if environ.get("STREAMLINK_USE_PYCRYPTO"): 32 deps.append("pycrypto") 33 else: 34 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6 35 deps.append("pycryptodome>=3.4.3,<4") 36 37 # for localization 38 if environ.get("STREAMLINK_USE_PYCOUNTRY"): 39 deps.append("pycountry") 40 else: 41 deps.append("iso-639") 42 deps.append("iso3166") 43 44 # When we build an egg for the Win32 bootstrap we don"t want dependency 45 # information built into it. 46 if environ.get("NO_DEPS"): 47 deps = [] 48 49 this_directory = path.abspath(path.dirname(__file__)) 50 srcdir = path.join(this_directory, "src/") 51 sys_path.insert(0, srcdir) 52 53 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f: 54 long_description = f.read() 55 56 setup(name="streamlink", 57 version=versioneer.get_version(), 58 cmdclass=versioneer.get_cmdclass(), 59 description="Streamlink is command-line utility that extracts streams " 60 "from various services and pipes them into a video player of " 61 "choice.", 62 long_description=long_description, 63 long_description_content_type="text/markdown", 64 url="https://github.com/streamlink/streamlink", 65 project_urls={ 66 "Documentation": "https://streamlink.github.io/", 67 "Tracker": "https://github.com/streamlink/streamlink/issues", 68 "Source": "https://github.com/streamlink/streamlink", 69 "Funding": "https://opencollective.com/streamlink" 70 }, 71 author="Streamlink", 72 # temp until we have a mailing list / global email 73 author_email="[email protected]", 74 license="Simplified BSD", 75 packages=find_packages("src"), 76 package_dir={"": "src"}, 77 entry_points={ 78 "console_scripts": ["streamlink=streamlink_cli.main:main"] 79 }, 80 install_requires=deps, 81 test_suite="tests", 82 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4", 83 classifiers=["Development Status :: 5 - Production/Stable", 84 "License :: OSI Approved :: BSD License", 85 "Environment :: Console", 86 "Intended Audience :: End Users/Desktop", 87 "Operating System :: POSIX", 88 "Operating System :: Microsoft :: Windows", 89 "Operating System :: MacOS", 90 "Programming Language :: Python :: 2.7", 91 "Programming Language :: Python :: 3.4", 92 "Programming Language :: Python :: 3.5", 93 "Programming Language :: Python :: 3.6", 94 "Programming Language :: Python :: 3.7", 95 "Topic :: Internet :: WWW/HTTP", 96 "Topic :: Multimedia :: Sound/Audio", 97 "Topic :: Multimedia :: Video", 98 "Topic :: Utilities"]) 99 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -13,8 +13,8 @@ 'futures;python_version<"3.0"', # Require singledispatch on Python <3.4 'singledispatch;python_version<"3.4"', - "requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0", - 'urllib3[secure]<1.23,>=1.21.1;python_version<"3.0"', + "requests>=2.21.0,<3.0", + 'urllib3[secure]>=1.23;python_version<"3.0"', "isodate", "websocket-client", # Support for SOCKS proxies
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -13,8 +13,8 @@\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n- \"requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0\",\n- 'urllib3[secure]<1.23,>=1.21.1;python_version<\"3.0\"',\n+ \"requests>=2.21.0,<3.0\",\n+ 'urllib3[secure]>=1.23;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n", "issue": "Update dependencies with security vulnerabilities\nAs mentioned here:\r\nhttps://github.com/streamlink/streamlink/pull/2199#issuecomment-447567326\r\n\r\n1. urllib3\r\n > Vulnerable versions: < 1.23\r\n > Patched version: 1.23\r\n > urllib3 before version 1.23 does not remove the Authorization HTTP header when following a cross-origin redirect (i.e., a redirect that differs in host, port, or scheme). This can allow for credentials in the Authorization header to be exposed to unintended hosts or transmitted in cleartext.\r\n https://nvd.nist.gov/vuln/detail/CVE-2018-20060\r\n2. requests\r\n > Vulnerable versions: <= 2.19.1\r\n > Patched version: 2.20.0\r\n > The Requests package through 2.19.1 before 2018-09-14 for Python sends an HTTP Authorization header to an http URI upon receiving a same-hostname https-to-http redirect, which makes it easier for remote attackers to discover credentials by sniffing the network.\r\n https://nvd.nist.gov/vuln/detail/CVE-2018-18074\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ\nfrom os import path\nfrom sys import path as sys_path\n\nfrom setuptools import setup, find_packages\n\nimport versioneer\n\ndeps = [\n # Require backport of concurrent.futures on Python 2\n 'futures;python_version<\"3.0\"',\n # Require singledispatch on Python <3.4\n 'singledispatch;python_version<\"3.4\"',\n \"requests>=2.2,!=2.12.0,!=2.12.1,!=2.16.0,!=2.16.1,!=2.16.2,!=2.16.3,!=2.16.4,!=2.16.5,!=2.17.1,<3.0\",\n 'urllib3[secure]<1.23,>=1.21.1;python_version<\"3.0\"',\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n # win-inet-pton is missing a dependency in PySocks, this has been fixed but not released yet\n # Required due to missing socket.inet_ntop & socket.inet_pton method in Windows Python 2.x\n 'win-inet-pton;python_version<\"3.0\" and platform_system==\"Windows\"',\n # shutil.get_terminal_size and which were added in Python 3.3\n 'backports.shutil_which;python_version<\"3.3\"',\n 'backports.shutil_get_terminal_size;python_version<\"3.3\"'\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points={\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n },\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
2,013
240
gh_patches_debug_18267
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-689
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E2504 enforces incorrect boundary 100-2000, should be 100-20000 *cfn-lint version: (`cfn-lint --version`)* 0.15.0 (also tested with 0.14.1) **Description:** AWS EC2 Instance with block device mapping does not enforce the correct `Ebs/Iops` boundary of 100-20000 as specified in the [documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html) It looks like it actually enforces 100-2000, as if you set `Iops` to 2000 it passes the linter. **Sample to reproduce:** ```yaml Resources: Machine: Type: AWS::EC2::Instance Properties: ImageId: "ami-79fd7eee" KeyName: "testkey" BlockDeviceMappings: - DeviceName: /dev/sdm Ebs: VolumeType: io1 Iops: 3000 DeleteOnTermination: false VolumeSize: 20 ``` Output: ```bash > cfn-lint minimal-stack.yaml E2504 Property Iops should be Int between 100 to 20000 Resources/Machine/Properties/BlockDeviceMappings/0/Ebs/Iops minimal-stack.yaml:11:13 ``` </issue> <code> [start of src/cfnlint/rules/resources/ectwo/Ebs.py] 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import re 18 import six 19 from cfnlint import CloudFormationLintRule 20 from cfnlint import RuleMatch 21 22 23 class Ebs(CloudFormationLintRule): 24 """Check if Ec2 Ebs Resource Properties""" 25 id = 'E2504' 26 shortdesc = 'Check Ec2 Ebs Properties' 27 description = 'See if Ec2 Eb2 Properties are valid' 28 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html' 29 tags = ['properties', 'ec2', 'ebs'] 30 31 def _checkEbs(self, cfn, ebs, path): 32 matches = [] 33 34 if isinstance(ebs, dict): 35 volume_types_obj = cfn.get_values(ebs, 'VolumeType') 36 iops_obj = cfn.get_values(ebs, 'Iops') 37 if volume_types_obj is not None: 38 for volume_type_obj in volume_types_obj: 39 volume_type = volume_type_obj.get('Value') 40 if isinstance(volume_type, six.string_types): 41 if volume_type == 'io1': 42 if iops_obj is None: 43 pathmessage = path[:] + ['VolumeType'] 44 message = 'VolumeType io1 requires Iops to be specified for {0}' 45 matches.append( 46 RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage))))) 47 else: 48 try: 49 if len(iops_obj) == 1: 50 iops = iops_obj[0]['Value'] 51 if isinstance(iops, (six.string_types, int)) and not iops_obj[0]['Path']: 52 iops_value = int(iops) 53 if iops_value < 100 or iops_value > 2000: 54 pathmessage = path[:] + ['Iops'] 55 message = 'Property Iops should be Int between 100 to 20000 {0}' 56 matches.append( 57 RuleMatch( 58 pathmessage, 59 message.format('/'.join(map(str, pathmessage))))) 60 except ValueError: 61 pathmessage = path[:] + ['Iops'] 62 message = 'Property Iops should be Int between 100 to 20000 {0}' 63 matches.append( 64 RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage))))) 65 elif volume_type: 66 if iops_obj is not None: 67 pathmessage = path[:] + ['Iops'] 68 message = 'Iops shouldn\'t be defined for type {0} for {1}' 69 matches.append( 70 RuleMatch( 71 pathmessage, 72 message.format(volume_type, '/'.join(map(str, pathmessage))))) 73 74 return matches 75 76 def match(self, cfn): 77 """Check Ec2 Ebs Resource Parameters""" 78 79 matches = [] 80 81 results = cfn.get_resource_properties(['AWS::EC2::Instance', 'BlockDeviceMappings']) 82 results.extend(cfn.get_resource_properties(['AWS::AutoScaling::LaunchConfiguration', 'BlockDeviceMappings'])) 83 for result in results: 84 path = result['Path'] 85 for index, properties in enumerate(result['Value']): 86 virtual_name = properties.get('VirtualName') 87 ebs = properties.get('Ebs') 88 if virtual_name: 89 # switch to regex 90 if not re.match(r'^ephemeral[0-9]$', virtual_name): 91 pathmessage = path[:] + [index, 'VirtualName'] 92 message = 'Property VirtualName should be of type ephemeral(n) for {0}' 93 matches.append( 94 RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage))))) 95 elif ebs: 96 matches.extend(self._checkEbs(cfn, ebs, path[:] + [index, 'Ebs'])) 97 return matches 98 [end of src/cfnlint/rules/resources/ectwo/Ebs.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/ectwo/Ebs.py b/src/cfnlint/rules/resources/ectwo/Ebs.py --- a/src/cfnlint/rules/resources/ectwo/Ebs.py +++ b/src/cfnlint/rules/resources/ectwo/Ebs.py @@ -50,7 +50,7 @@ iops = iops_obj[0]['Value'] if isinstance(iops, (six.string_types, int)) and not iops_obj[0]['Path']: iops_value = int(iops) - if iops_value < 100 or iops_value > 2000: + if iops_value < 100 or iops_value > 20000: pathmessage = path[:] + ['Iops'] message = 'Property Iops should be Int between 100 to 20000 {0}' matches.append(
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/ectwo/Ebs.py b/src/cfnlint/rules/resources/ectwo/Ebs.py\n--- a/src/cfnlint/rules/resources/ectwo/Ebs.py\n+++ b/src/cfnlint/rules/resources/ectwo/Ebs.py\n@@ -50,7 +50,7 @@\n iops = iops_obj[0]['Value']\n if isinstance(iops, (six.string_types, int)) and not iops_obj[0]['Path']:\n iops_value = int(iops)\n- if iops_value < 100 or iops_value > 2000:\n+ if iops_value < 100 or iops_value > 20000:\n pathmessage = path[:] + ['Iops']\n message = 'Property Iops should be Int between 100 to 20000 {0}'\n matches.append(\n", "issue": "E2504 enforces incorrect boundary 100-2000, should be 100-20000\n*cfn-lint version: (`cfn-lint --version`)* 0.15.0 (also tested with 0.14.1)\r\n\r\n**Description:**\r\nAWS EC2 Instance with block device mapping does not enforce the correct `Ebs/Iops` boundary of 100-20000 as specified in the [documentation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html)\r\n\r\nIt looks like it actually enforces 100-2000, as if you set `Iops` to 2000 it passes the linter.\r\n\r\n**Sample to reproduce:**\r\n```yaml\r\nResources:\r\n Machine:\r\n Type: AWS::EC2::Instance\r\n Properties:\r\n ImageId: \"ami-79fd7eee\"\r\n KeyName: \"testkey\"\r\n BlockDeviceMappings: \r\n - DeviceName: /dev/sdm\r\n Ebs: \r\n VolumeType: io1\r\n Iops: 3000 \r\n DeleteOnTermination: false\r\n VolumeSize: 20\r\n```\r\n\r\nOutput:\r\n```bash\r\n> cfn-lint minimal-stack.yaml\r\n\r\nE2504 Property Iops should be Int between 100 to 20000 Resources/Machine/Properties/BlockDeviceMappings/0/Ebs/Iops\r\nminimal-stack.yaml:11:13\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport re\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Ebs(CloudFormationLintRule):\n \"\"\"Check if Ec2 Ebs Resource Properties\"\"\"\n id = 'E2504'\n shortdesc = 'Check Ec2 Ebs Properties'\n description = 'See if Ec2 Eb2 Properties are valid'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-blockdev-template.html'\n tags = ['properties', 'ec2', 'ebs']\n\n def _checkEbs(self, cfn, ebs, path):\n matches = []\n\n if isinstance(ebs, dict):\n volume_types_obj = cfn.get_values(ebs, 'VolumeType')\n iops_obj = cfn.get_values(ebs, 'Iops')\n if volume_types_obj is not None:\n for volume_type_obj in volume_types_obj:\n volume_type = volume_type_obj.get('Value')\n if isinstance(volume_type, six.string_types):\n if volume_type == 'io1':\n if iops_obj is None:\n pathmessage = path[:] + ['VolumeType']\n message = 'VolumeType io1 requires Iops to be specified for {0}'\n matches.append(\n RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n else:\n try:\n if len(iops_obj) == 1:\n iops = iops_obj[0]['Value']\n if isinstance(iops, (six.string_types, int)) and not iops_obj[0]['Path']:\n iops_value = int(iops)\n if iops_value < 100 or iops_value > 2000:\n pathmessage = path[:] + ['Iops']\n message = 'Property Iops should be Int between 100 to 20000 {0}'\n matches.append(\n RuleMatch(\n pathmessage,\n message.format('/'.join(map(str, pathmessage)))))\n except ValueError:\n pathmessage = path[:] + ['Iops']\n message = 'Property Iops should be Int between 100 to 20000 {0}'\n matches.append(\n RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n elif volume_type:\n if iops_obj is not None:\n pathmessage = path[:] + ['Iops']\n message = 'Iops shouldn\\'t be defined for type {0} for {1}'\n matches.append(\n RuleMatch(\n pathmessage,\n message.format(volume_type, '/'.join(map(str, pathmessage)))))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check Ec2 Ebs Resource Parameters\"\"\"\n\n matches = []\n\n results = cfn.get_resource_properties(['AWS::EC2::Instance', 'BlockDeviceMappings'])\n results.extend(cfn.get_resource_properties(['AWS::AutoScaling::LaunchConfiguration', 'BlockDeviceMappings']))\n for result in results:\n path = result['Path']\n for index, properties in enumerate(result['Value']):\n virtual_name = properties.get('VirtualName')\n ebs = properties.get('Ebs')\n if virtual_name:\n # switch to regex\n if not re.match(r'^ephemeral[0-9]$', virtual_name):\n pathmessage = path[:] + [index, 'VirtualName']\n message = 'Property VirtualName should be of type ephemeral(n) for {0}'\n matches.append(\n RuleMatch(pathmessage, message.format('/'.join(map(str, pathmessage)))))\n elif ebs:\n matches.extend(self._checkEbs(cfn, ebs, path[:] + [index, 'Ebs']))\n return matches\n", "path": "src/cfnlint/rules/resources/ectwo/Ebs.py"}]}
2,083
201
gh_patches_debug_14741
rasdani/github-patches
git_diff
OpenMined__PySyft-3684
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update Polynomial operations for non same size polynomials. ## Description Fix poly_mul_mod operations for two non-same size polynomial arguments. polynomial operations are done with wrong orientation. ## How to Reproduce 1. Apply `poly_mul_mod` with two non-same size polynomials. 2. The result is incorrect. </issue> <code> [start of syft/frameworks/torch/he/fv/util/operations.py] 1 import numpy as np 2 from numpy.polynomial import polynomial as poly 3 4 from syft.frameworks.torch.he.fv.ciphertext import CipherText 5 6 7 def multiply_mod(operand1, operand2, modulus): 8 return (operand1 * operand2) % modulus 9 10 11 def negate_mod(operand, modulus): 12 """returns (-1 * operand) % modulus""" 13 if modulus == 0: 14 raise ValueError("Modulus cannot be 0") 15 if operand >= modulus: 16 raise OverflowError("operand cannot be greater than modulus") 17 non_zero = operand != 0 18 return (modulus - operand) & (-int(non_zero)) 19 20 21 def exponentiate_mod(operand, exponent, modulus): 22 if exponent == 0: 23 return 1 24 25 if exponent == 1: 26 return operand 27 28 # Perform binary exponentiation. 29 power = operand 30 product = 0 31 intermediate = 1 32 33 # Initially: power = operand and intermediate = 1, product is irrelevant. 34 while True: 35 if exponent & 1: 36 product = multiply_mod(power, intermediate, modulus) 37 product, intermediate = intermediate, product 38 39 exponent >>= 1 40 41 if exponent == 0: 42 break 43 44 product = multiply_mod(power, power, modulus) 45 product, power = power, product 46 47 return intermediate 48 49 50 def invert_mod(value, modulus): 51 """calculate inverse modulus for given value and modulus""" 52 gcd_tuple = xgcd(value, modulus) 53 54 if gcd_tuple[1] < 0: 55 return gcd_tuple[1] + modulus 56 else: 57 return gcd_tuple[1] 58 59 60 def poly_add_mod(op1, op2, modulus): 61 """return addition of two polynomials with all coefficients of 62 polynomial %q(coefficient modulus)""" 63 return np.mod(np.polyadd(op1, op2), modulus).tolist() 64 65 66 def poly_mul_mod(op1, op2, modulus): 67 """return multiplication of two polynomials with all coefficients of 68 polynomial %q(coefficient modulus) and result polynomial % t(polynomial modulus)""" 69 poly_mod = np.array([1] + [0] * (len(op1) - 1) + [1]) 70 result = ( 71 poly.polydiv( 72 poly.polymul(np.array(op1, dtype="object"), np.array(op2, dtype="object")) % modulus, 73 poly_mod, 74 )[1] 75 % modulus 76 ).tolist() 77 return [round(x) for x in result] 78 79 80 def poly_negate_mod(op, modulus): 81 """returns negative of polynomial i.e (-1 * op)""" 82 coeff_count = len(op) 83 84 result = [0] * coeff_count 85 for i in range(coeff_count): 86 if modulus == 0: 87 raise ValueError("Modulus cannot be 0") 88 if op[i] >= modulus: 89 raise OverflowError("operand cannot be greater than modulus") 90 non_zero = op[i] != 0 91 result[i] = (modulus - op[i]) & (-int(non_zero)) 92 return result 93 94 95 def get_significant_count(values): 96 """removes leading zero's from the list.""" 97 count = len(values) 98 i = count - 1 99 while count and not values[i]: 100 i -= 1 101 count -= 1 102 return count 103 104 105 def reverse_bit(value): 106 """calculate the value of the reverse binary representation of the given integer.""" 107 result = 0 108 while value: 109 result = (result << 1) + (value & 1) 110 value >>= 1 111 return result 112 113 114 def multiply_many_except(operands, count, expt): 115 result = 1 116 for i in range(count): 117 if i != expt: 118 result *= operands[i] 119 return result 120 121 122 def xgcd(x, y): 123 """ Extended GCD 124 125 Args: 126 x (integer) 127 y (integer) 128 129 Returns: 130 (gcd, x, y) where gcd is the greatest common divisor of a and b. 131 The numbers x, y are such that gcd = ax + by. 132 """ 133 prev_a = 1 134 a = 0 135 prev_b = 0 136 b = 1 137 138 while y != 0: 139 q = x // y 140 temp = x % y 141 x = y 142 y = temp 143 144 temp = a 145 a = prev_a - q * a 146 prev_a = temp 147 148 temp = b 149 b = prev_b - q * b 150 prev_b = temp 151 return [x, prev_a, prev_b] 152 153 154 def multiply_add_plain_with_delta(phase, message, context): 155 """Add message (PlainText) into phase. 156 157 Args: 158 phase: phase is pre-computed carrier polynomial where we can add message data. 159 message (Plaintext): A plaintext representation of integer data to be encrypted. 160 context (Context): Context for extracting encryption parameters. 161 162 Returns: 163 A Ciphertext object with the encrypted result of encryption process. 164 """ 165 coeff_modulus = context.param.coeff_modulus 166 message = message.data 167 plain_coeff_count = len(message) 168 delta = context.coeff_div_plain_modulus 169 phase0, phase1 = phase.data # here phase = pk * u * e 170 171 # Coefficients of plain m multiplied by coeff_modulus q, divided by plain_modulus t, 172 # and rounded to the nearest integer (rounded up in case of a tie). Equivalent to 173 for i in range(plain_coeff_count): 174 for j in range(len(coeff_modulus)): 175 temp = round(delta[j] * message[i]) % coeff_modulus[j] 176 phase0[j][i] = (phase0[j][i] + temp) % coeff_modulus[j] 177 178 return CipherText([phase0, phase1]) # phase0 = pk0 * u * e + delta * m 179 [end of syft/frameworks/torch/he/fv/util/operations.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/syft/frameworks/torch/he/fv/util/operations.py b/syft/frameworks/torch/he/fv/util/operations.py --- a/syft/frameworks/torch/he/fv/util/operations.py +++ b/syft/frameworks/torch/he/fv/util/operations.py @@ -66,6 +66,15 @@ def poly_mul_mod(op1, op2, modulus): """return multiplication of two polynomials with all coefficients of polynomial %q(coefficient modulus) and result polynomial % t(polynomial modulus)""" + + # For non same size polynomails we have to shift the polynomials because numpy consider right + # side as lower order of polynomial and we consider right side as heigher order. + if len(op1) != len(op2): + if len(op1) > len(op2): + op2 = op2 + [0] * (len(op1) - len(op2)) + else: + op1 = op1 + [0] * (len(op2) - len(op1)) + poly_mod = np.array([1] + [0] * (len(op1) - 1) + [1]) result = ( poly.polydiv(
{"golden_diff": "diff --git a/syft/frameworks/torch/he/fv/util/operations.py b/syft/frameworks/torch/he/fv/util/operations.py\n--- a/syft/frameworks/torch/he/fv/util/operations.py\n+++ b/syft/frameworks/torch/he/fv/util/operations.py\n@@ -66,6 +66,15 @@\n def poly_mul_mod(op1, op2, modulus):\n \"\"\"return multiplication of two polynomials with all coefficients of\n polynomial %q(coefficient modulus) and result polynomial % t(polynomial modulus)\"\"\"\n+\n+ # For non same size polynomails we have to shift the polynomials because numpy consider right\n+ # side as lower order of polynomial and we consider right side as heigher order.\n+ if len(op1) != len(op2):\n+ if len(op1) > len(op2):\n+ op2 = op2 + [0] * (len(op1) - len(op2))\n+ else:\n+ op1 = op1 + [0] * (len(op2) - len(op1))\n+\n poly_mod = np.array([1] + [0] * (len(op1) - 1) + [1])\n result = (\n poly.polydiv(\n", "issue": "Update Polynomial operations for non same size polynomials.\n## Description\r\nFix poly_mul_mod operations for two non-same size polynomial arguments.\r\npolynomial operations are done with wrong orientation.\r\n\r\n## How to Reproduce\r\n1. Apply `poly_mul_mod` with two non-same size polynomials.\r\n2. The result is incorrect.\r\n\n", "before_files": [{"content": "import numpy as np\nfrom numpy.polynomial import polynomial as poly\n\nfrom syft.frameworks.torch.he.fv.ciphertext import CipherText\n\n\ndef multiply_mod(operand1, operand2, modulus):\n return (operand1 * operand2) % modulus\n\n\ndef negate_mod(operand, modulus):\n \"\"\"returns (-1 * operand) % modulus\"\"\"\n if modulus == 0:\n raise ValueError(\"Modulus cannot be 0\")\n if operand >= modulus:\n raise OverflowError(\"operand cannot be greater than modulus\")\n non_zero = operand != 0\n return (modulus - operand) & (-int(non_zero))\n\n\ndef exponentiate_mod(operand, exponent, modulus):\n if exponent == 0:\n return 1\n\n if exponent == 1:\n return operand\n\n # Perform binary exponentiation.\n power = operand\n product = 0\n intermediate = 1\n\n # Initially: power = operand and intermediate = 1, product is irrelevant.\n while True:\n if exponent & 1:\n product = multiply_mod(power, intermediate, modulus)\n product, intermediate = intermediate, product\n\n exponent >>= 1\n\n if exponent == 0:\n break\n\n product = multiply_mod(power, power, modulus)\n product, power = power, product\n\n return intermediate\n\n\ndef invert_mod(value, modulus):\n \"\"\"calculate inverse modulus for given value and modulus\"\"\"\n gcd_tuple = xgcd(value, modulus)\n\n if gcd_tuple[1] < 0:\n return gcd_tuple[1] + modulus\n else:\n return gcd_tuple[1]\n\n\ndef poly_add_mod(op1, op2, modulus):\n \"\"\"return addition of two polynomials with all coefficients of\n polynomial %q(coefficient modulus)\"\"\"\n return np.mod(np.polyadd(op1, op2), modulus).tolist()\n\n\ndef poly_mul_mod(op1, op2, modulus):\n \"\"\"return multiplication of two polynomials with all coefficients of\n polynomial %q(coefficient modulus) and result polynomial % t(polynomial modulus)\"\"\"\n poly_mod = np.array([1] + [0] * (len(op1) - 1) + [1])\n result = (\n poly.polydiv(\n poly.polymul(np.array(op1, dtype=\"object\"), np.array(op2, dtype=\"object\")) % modulus,\n poly_mod,\n )[1]\n % modulus\n ).tolist()\n return [round(x) for x in result]\n\n\ndef poly_negate_mod(op, modulus):\n \"\"\"returns negative of polynomial i.e (-1 * op)\"\"\"\n coeff_count = len(op)\n\n result = [0] * coeff_count\n for i in range(coeff_count):\n if modulus == 0:\n raise ValueError(\"Modulus cannot be 0\")\n if op[i] >= modulus:\n raise OverflowError(\"operand cannot be greater than modulus\")\n non_zero = op[i] != 0\n result[i] = (modulus - op[i]) & (-int(non_zero))\n return result\n\n\ndef get_significant_count(values):\n \"\"\"removes leading zero's from the list.\"\"\"\n count = len(values)\n i = count - 1\n while count and not values[i]:\n i -= 1\n count -= 1\n return count\n\n\ndef reverse_bit(value):\n \"\"\"calculate the value of the reverse binary representation of the given integer.\"\"\"\n result = 0\n while value:\n result = (result << 1) + (value & 1)\n value >>= 1\n return result\n\n\ndef multiply_many_except(operands, count, expt):\n result = 1\n for i in range(count):\n if i != expt:\n result *= operands[i]\n return result\n\n\ndef xgcd(x, y):\n \"\"\" Extended GCD\n\n Args:\n x (integer)\n y (integer)\n\n Returns:\n (gcd, x, y) where gcd is the greatest common divisor of a and b.\n The numbers x, y are such that gcd = ax + by.\n \"\"\"\n prev_a = 1\n a = 0\n prev_b = 0\n b = 1\n\n while y != 0:\n q = x // y\n temp = x % y\n x = y\n y = temp\n\n temp = a\n a = prev_a - q * a\n prev_a = temp\n\n temp = b\n b = prev_b - q * b\n prev_b = temp\n return [x, prev_a, prev_b]\n\n\ndef multiply_add_plain_with_delta(phase, message, context):\n \"\"\"Add message (PlainText) into phase.\n\n Args:\n phase: phase is pre-computed carrier polynomial where we can add message data.\n message (Plaintext): A plaintext representation of integer data to be encrypted.\n context (Context): Context for extracting encryption parameters.\n\n Returns:\n A Ciphertext object with the encrypted result of encryption process.\n \"\"\"\n coeff_modulus = context.param.coeff_modulus\n message = message.data\n plain_coeff_count = len(message)\n delta = context.coeff_div_plain_modulus\n phase0, phase1 = phase.data # here phase = pk * u * e\n\n # Coefficients of plain m multiplied by coeff_modulus q, divided by plain_modulus t,\n # and rounded to the nearest integer (rounded up in case of a tie). Equivalent to\n for i in range(plain_coeff_count):\n for j in range(len(coeff_modulus)):\n temp = round(delta[j] * message[i]) % coeff_modulus[j]\n phase0[j][i] = (phase0[j][i] + temp) % coeff_modulus[j]\n\n return CipherText([phase0, phase1]) # phase0 = pk0 * u * e + delta * m\n", "path": "syft/frameworks/torch/he/fv/util/operations.py"}]}
2,333
276
gh_patches_debug_29297
rasdani/github-patches
git_diff
gratipay__gratipay.com-2979
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Minor text injection vulnerability See https://gratipay.com/on/twitter/who%20you%20say%20you%20are...%20Visit%20gratipay.evilsite.com%20to%20verify%20your%20account%20and%20send%20me%20money.%20Maybe%20you%20don%27t%20have%20twitter/failure.html?action Pretty minor I admit but could potentially be exploited with encoding. We should validate twitter/github/facebook usernames. </issue> <code> [start of gratipay/elsewhere/__init__.py] 1 """This subpackage contains functionality for working with accounts elsewhere. 2 """ 3 from __future__ import division, print_function, unicode_literals 4 5 from collections import OrderedDict 6 from datetime import datetime 7 import hashlib 8 import json 9 import logging 10 from urllib import quote 11 import xml.etree.ElementTree as ET 12 13 from aspen import log, Response 14 from aspen.utils import to_age, utc 15 from requests_oauthlib import OAuth1Session, OAuth2Session 16 17 from gratipay.elsewhere._extractors import not_available 18 19 20 ACTIONS = {'opt-in', 'connect', 'lock', 'unlock'} 21 PLATFORMS = 'facebook google bitbucket bountysource github openstreetmap twitter venmo'.split() 22 23 24 class UnknownAccountElsewhere(Exception): pass 25 26 27 class PlatformRegistry(object): 28 """Registry of platforms we support connecting to Gratipay accounts. 29 """ 30 def __init__(self, platforms): 31 self.__dict__ = OrderedDict((p.name, p) for p in platforms) 32 33 def __contains__(self, platform): 34 return platform.name in self.__dict__ 35 36 def __iter__(self): 37 return iter(self.__dict__.values()) 38 39 40 class UserInfo(object): 41 """A simple container for a user's info. 42 43 Accessing a non-existing attribute returns `None`. 44 """ 45 46 def __init__(self, **kw): 47 self.__dict__.update(kw) 48 49 def __getattr__(self, key): 50 return self.__dict__.get(key, None) 51 52 def __setattr__(self, key, value): 53 if value is None: 54 self.__dict__.pop(key, None) 55 else: 56 self.__dict__[key] = value 57 58 59 class Platform(object): 60 61 allows_team_connect = False 62 63 # "x" stands for "extract" 64 x_user_info = not_available 65 x_user_id = not_available 66 x_user_name = not_available 67 x_display_name = not_available 68 x_email = not_available 69 x_gravatar_id = not_available 70 x_avatar_url = not_available 71 x_is_team = not_available 72 73 required_attrs = ( 'account_url' 74 , 'display_name' 75 , 'name' 76 ) 77 78 def __init__(self, api_key, api_secret, callback_url, api_url=None, auth_url=None): 79 self.api_key = api_key 80 self.api_secret = api_secret 81 self.callback_url = callback_url 82 if api_url: 83 self.api_url = api_url 84 if auth_url: 85 self.auth_url = auth_url 86 elif not getattr(self, 'auth_url', None): 87 self.auth_url = self.api_url 88 89 # Determine the appropriate response parser using `self.api_format` 90 api_format = getattr(self, 'api_format', None) 91 if api_format == 'json': 92 self.api_parser = lambda r: r.json() 93 elif api_format == 'xml': 94 self.api_parser = lambda r: ET.fromstring(r.content) 95 elif api_format: 96 raise ValueError('unknown API format: '+str(api_format)) 97 98 # Make sure the subclass was implemented properly. 99 missing_attrs = [a for a in self.required_attrs if not hasattr(self, a)] 100 if missing_attrs: 101 msg = "The class %s is missing these required attributes: %s" 102 msg %= self.__class__.__name__, ', '.join(missing_attrs) 103 raise AttributeError(msg) 104 105 def api_get(self, path, sess=None, **kw): 106 """ 107 Given a `path` (e.g. /users/foo), this function sends a GET request to 108 the platform's API (e.g. https://api.github.com/users/foo). 109 110 The response is returned, after checking its status code and ratelimit 111 headers. 112 """ 113 if not sess: 114 sess = self.get_auth_session() 115 response = sess.get(self.api_url+path, **kw) 116 117 # Check status 118 status = response.status_code 119 if status == 404: 120 raise Response(404, response.text) 121 elif status != 200: 122 log('{} api responded with {}:\n{}'.format(self.name, status, response.text) 123 , level=logging.ERROR) 124 raise Response(500, '{} lookup failed with {}'.format(self.name, status)) 125 126 # Check ratelimit headers 127 prefix = getattr(self, 'ratelimit_headers_prefix', None) 128 if prefix: 129 limit = response.headers[prefix+'limit'] 130 remaining = response.headers[prefix+'remaining'] 131 reset = response.headers[prefix+'reset'] 132 try: 133 limit, remaining, reset = int(limit), int(remaining), int(reset) 134 except (TypeError, ValueError): 135 d = dict(limit=limit, remaining=remaining, reset=reset) 136 log('Got weird rate headers from %s: %s' % (self.name, d)) 137 else: 138 percent_remaining = remaining/limit 139 if percent_remaining < 0.5: 140 reset = to_age(datetime.fromtimestamp(reset, tz=utc)) 141 log_msg = ( 142 '{0} API: {1:.1%} of ratelimit has been consumed, ' 143 '{2} requests remaining, resets {3}.' 144 ).format(self.name, 1 - percent_remaining, remaining, reset) 145 log_lvl = logging.WARNING 146 if percent_remaining < 0.2: 147 log_lvl = logging.ERROR 148 elif percent_remaining < 0.05: 149 log_lvl = logging.CRITICAL 150 log(log_msg, log_lvl) 151 152 return response 153 154 def extract_user_info(self, info): 155 """ 156 Given a user_info object of variable type (depending on the platform), 157 extract the relevant information by calling the platform's extractors 158 (`x_user_name`, `x_user_id`, etc). 159 160 Returns a `UserInfo`. The `user_id` attribute is guaranteed to have a 161 unique non-empty value. 162 """ 163 r = UserInfo(platform=self.name) 164 info = self.x_user_info(r, info, info) 165 r.user_name = self.x_user_name(r, info, None) 166 if self.x_user_id.__func__ is not_available: 167 r.user_id = r.user_name 168 else: 169 r.user_id = self.x_user_id(r, info) 170 assert r.user_id is not None 171 r.user_id = unicode(r.user_id) 172 assert len(r.user_id) > 0 173 r.display_name = self.x_display_name(r, info, None) 174 r.email = self.x_email(r, info, None) 175 r.avatar_url = self.x_avatar_url(r, info, None) 176 if not r.avatar_url: 177 gravatar_id = self.x_gravatar_id(r, info, None) 178 if r.email and not gravatar_id: 179 gravatar_id = hashlib.md5(r.email.strip().lower()).hexdigest() 180 if gravatar_id: 181 r.avatar_url = 'https://secure.gravatar.com/avatar/'+gravatar_id 182 r.is_team = self.x_is_team(r, info, False) 183 r.extra_info = info 184 return r 185 186 def get_team_members(self, team_name, page_url=None): 187 """Given a team_name on the platform, return the team's membership list 188 from the API. 189 """ 190 default_url = self.api_team_members_path.format(user_name=quote(team_name)) 191 r = self.api_get(page_url or default_url) 192 members, count, pages_urls = self.api_paginator(r, self.api_parser(r)) 193 members = [self.extract_user_info(m) for m in members] 194 return members, count, pages_urls 195 196 def get_user_info(self, user_name, sess=None): 197 """Given a user_name on the platform, get the user's info from the API. 198 """ 199 try: 200 path = self.api_user_info_path.format(user_name=quote(user_name)) 201 except KeyError: 202 raise Response(404) 203 info = self.api_parser(self.api_get(path, sess=sess)) 204 return self.extract_user_info(info) 205 206 def get_user_self_info(self, sess): 207 """Get the authenticated user's info from the API. 208 """ 209 r = self.api_get(self.api_user_self_info_path, sess=sess) 210 info = self.extract_user_info(self.api_parser(r)) 211 token = getattr(sess, 'token', None) 212 if token: 213 info.token = json.dumps(token) 214 return info 215 216 217 class PlatformOAuth1(Platform): 218 219 request_token_path = '/oauth/request_token' 220 authorize_path = '/oauth/authorize' 221 access_token_path = '/oauth/access_token' 222 223 def get_auth_session(self, token=None): 224 args = () 225 if token: 226 args = (token['token'], token['token_secret']) 227 return OAuth1Session(self.api_key, self.api_secret, *args, 228 callback_uri=self.callback_url) 229 230 def get_auth_url(self, **kw): 231 sess = self.get_auth_session() 232 r = sess.fetch_request_token(self.auth_url+self.request_token_path) 233 url = sess.authorization_url(self.auth_url+self.authorize_path) 234 return url, r['oauth_token'], r['oauth_token_secret'] 235 236 def get_query_id(self, querystring): 237 return querystring['oauth_token'] 238 239 def handle_auth_callback(self, url, token, token_secret): 240 sess = self.get_auth_session(dict(token=token, token_secret=token_secret)) 241 sess.parse_authorization_response(url) 242 r = sess.fetch_access_token(self.auth_url+self.access_token_path) 243 sess.token = dict(token=r['oauth_token'], 244 token_secret=r['oauth_token_secret']) 245 return sess 246 247 248 class PlatformOAuth2(Platform): 249 250 oauth_default_scope = None 251 oauth_email_scope = None 252 oauth_payment_scope = None 253 254 def get_auth_session(self, state=None, token=None, token_updater=None): 255 return OAuth2Session(self.api_key, state=state, token=token, 256 token_updater=token_updater, 257 redirect_uri=self.callback_url, 258 scope=self.oauth_default_scope) 259 260 def get_auth_url(self, **kw): 261 sess = self.get_auth_session() 262 url, state = sess.authorization_url(self.auth_url) 263 return url, state, '' 264 265 def get_query_id(self, querystring): 266 return querystring['state'] 267 268 def handle_auth_callback(self, url, state, unused_arg): 269 sess = self.get_auth_session(state=state) 270 sess.fetch_token(self.access_token_url, 271 client_secret=self.api_secret, 272 authorization_response=url) 273 return sess 274 [end of gratipay/elsewhere/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gratipay/elsewhere/__init__.py b/gratipay/elsewhere/__init__.py --- a/gratipay/elsewhere/__init__.py +++ b/gratipay/elsewhere/__init__.py @@ -114,7 +114,14 @@ sess = self.get_auth_session() response = sess.get(self.api_url+path, **kw) - # Check status + self.check_api_response_status(response) + self.check_ratelimit_headers(response) + + return response + + def check_api_response_status(self, response): + """Pass through any 404, convert any other non-200 into a 500. + """ status = response.status_code if status == 404: raise Response(404, response.text) @@ -123,7 +130,9 @@ , level=logging.ERROR) raise Response(500, '{} lookup failed with {}'.format(self.name, status)) - # Check ratelimit headers + def check_ratelimit_headers(self, response): + """Emit log messages if we're running out of ratelimit. + """ prefix = getattr(self, 'ratelimit_headers_prefix', None) if prefix: limit = response.headers[prefix+'limit'] @@ -149,8 +158,6 @@ log_lvl = logging.CRITICAL log(log_msg, log_lvl) - return response - def extract_user_info(self, info): """ Given a user_info object of variable type (depending on the platform),
{"golden_diff": "diff --git a/gratipay/elsewhere/__init__.py b/gratipay/elsewhere/__init__.py\n--- a/gratipay/elsewhere/__init__.py\n+++ b/gratipay/elsewhere/__init__.py\n@@ -114,7 +114,14 @@\n sess = self.get_auth_session()\n response = sess.get(self.api_url+path, **kw)\n \n- # Check status\n+ self.check_api_response_status(response)\n+ self.check_ratelimit_headers(response)\n+\n+ return response\n+\n+ def check_api_response_status(self, response):\n+ \"\"\"Pass through any 404, convert any other non-200 into a 500.\n+ \"\"\"\n status = response.status_code\n if status == 404:\n raise Response(404, response.text)\n@@ -123,7 +130,9 @@\n , level=logging.ERROR)\n raise Response(500, '{} lookup failed with {}'.format(self.name, status))\n \n- # Check ratelimit headers\n+ def check_ratelimit_headers(self, response):\n+ \"\"\"Emit log messages if we're running out of ratelimit.\n+ \"\"\"\n prefix = getattr(self, 'ratelimit_headers_prefix', None)\n if prefix:\n limit = response.headers[prefix+'limit']\n@@ -149,8 +158,6 @@\n log_lvl = logging.CRITICAL\n log(log_msg, log_lvl)\n \n- return response\n-\n def extract_user_info(self, info):\n \"\"\"\n Given a user_info object of variable type (depending on the platform),\n", "issue": "Minor text injection vulnerability\nSee https://gratipay.com/on/twitter/who%20you%20say%20you%20are...%20Visit%20gratipay.evilsite.com%20to%20verify%20your%20account%20and%20send%20me%20money.%20Maybe%20you%20don%27t%20have%20twitter/failure.html?action\n\nPretty minor I admit but could potentially be exploited with encoding. We should validate twitter/github/facebook usernames.\n\n", "before_files": [{"content": "\"\"\"This subpackage contains functionality for working with accounts elsewhere.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport hashlib\nimport json\nimport logging\nfrom urllib import quote\nimport xml.etree.ElementTree as ET\n\nfrom aspen import log, Response\nfrom aspen.utils import to_age, utc\nfrom requests_oauthlib import OAuth1Session, OAuth2Session\n\nfrom gratipay.elsewhere._extractors import not_available\n\n\nACTIONS = {'opt-in', 'connect', 'lock', 'unlock'}\nPLATFORMS = 'facebook google bitbucket bountysource github openstreetmap twitter venmo'.split()\n\n\nclass UnknownAccountElsewhere(Exception): pass\n\n\nclass PlatformRegistry(object):\n \"\"\"Registry of platforms we support connecting to Gratipay accounts.\n \"\"\"\n def __init__(self, platforms):\n self.__dict__ = OrderedDict((p.name, p) for p in platforms)\n\n def __contains__(self, platform):\n return platform.name in self.__dict__\n\n def __iter__(self):\n return iter(self.__dict__.values())\n\n\nclass UserInfo(object):\n \"\"\"A simple container for a user's info.\n\n Accessing a non-existing attribute returns `None`.\n \"\"\"\n\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n def __getattr__(self, key):\n return self.__dict__.get(key, None)\n\n def __setattr__(self, key, value):\n if value is None:\n self.__dict__.pop(key, None)\n else:\n self.__dict__[key] = value\n\n\nclass Platform(object):\n\n allows_team_connect = False\n\n # \"x\" stands for \"extract\"\n x_user_info = not_available\n x_user_id = not_available\n x_user_name = not_available\n x_display_name = not_available\n x_email = not_available\n x_gravatar_id = not_available\n x_avatar_url = not_available\n x_is_team = not_available\n\n required_attrs = ( 'account_url'\n , 'display_name'\n , 'name'\n )\n\n def __init__(self, api_key, api_secret, callback_url, api_url=None, auth_url=None):\n self.api_key = api_key\n self.api_secret = api_secret\n self.callback_url = callback_url\n if api_url:\n self.api_url = api_url\n if auth_url:\n self.auth_url = auth_url\n elif not getattr(self, 'auth_url', None):\n self.auth_url = self.api_url\n\n # Determine the appropriate response parser using `self.api_format`\n api_format = getattr(self, 'api_format', None)\n if api_format == 'json':\n self.api_parser = lambda r: r.json()\n elif api_format == 'xml':\n self.api_parser = lambda r: ET.fromstring(r.content)\n elif api_format:\n raise ValueError('unknown API format: '+str(api_format))\n\n # Make sure the subclass was implemented properly.\n missing_attrs = [a for a in self.required_attrs if not hasattr(self, a)]\n if missing_attrs:\n msg = \"The class %s is missing these required attributes: %s\"\n msg %= self.__class__.__name__, ', '.join(missing_attrs)\n raise AttributeError(msg)\n\n def api_get(self, path, sess=None, **kw):\n \"\"\"\n Given a `path` (e.g. /users/foo), this function sends a GET request to\n the platform's API (e.g. https://api.github.com/users/foo).\n\n The response is returned, after checking its status code and ratelimit\n headers.\n \"\"\"\n if not sess:\n sess = self.get_auth_session()\n response = sess.get(self.api_url+path, **kw)\n\n # Check status\n status = response.status_code\n if status == 404:\n raise Response(404, response.text)\n elif status != 200:\n log('{} api responded with {}:\\n{}'.format(self.name, status, response.text)\n , level=logging.ERROR)\n raise Response(500, '{} lookup failed with {}'.format(self.name, status))\n\n # Check ratelimit headers\n prefix = getattr(self, 'ratelimit_headers_prefix', None)\n if prefix:\n limit = response.headers[prefix+'limit']\n remaining = response.headers[prefix+'remaining']\n reset = response.headers[prefix+'reset']\n try:\n limit, remaining, reset = int(limit), int(remaining), int(reset)\n except (TypeError, ValueError):\n d = dict(limit=limit, remaining=remaining, reset=reset)\n log('Got weird rate headers from %s: %s' % (self.name, d))\n else:\n percent_remaining = remaining/limit\n if percent_remaining < 0.5:\n reset = to_age(datetime.fromtimestamp(reset, tz=utc))\n log_msg = (\n '{0} API: {1:.1%} of ratelimit has been consumed, '\n '{2} requests remaining, resets {3}.'\n ).format(self.name, 1 - percent_remaining, remaining, reset)\n log_lvl = logging.WARNING\n if percent_remaining < 0.2:\n log_lvl = logging.ERROR\n elif percent_remaining < 0.05:\n log_lvl = logging.CRITICAL\n log(log_msg, log_lvl)\n\n return response\n\n def extract_user_info(self, info):\n \"\"\"\n Given a user_info object of variable type (depending on the platform),\n extract the relevant information by calling the platform's extractors\n (`x_user_name`, `x_user_id`, etc).\n\n Returns a `UserInfo`. The `user_id` attribute is guaranteed to have a\n unique non-empty value.\n \"\"\"\n r = UserInfo(platform=self.name)\n info = self.x_user_info(r, info, info)\n r.user_name = self.x_user_name(r, info, None)\n if self.x_user_id.__func__ is not_available:\n r.user_id = r.user_name\n else:\n r.user_id = self.x_user_id(r, info)\n assert r.user_id is not None\n r.user_id = unicode(r.user_id)\n assert len(r.user_id) > 0\n r.display_name = self.x_display_name(r, info, None)\n r.email = self.x_email(r, info, None)\n r.avatar_url = self.x_avatar_url(r, info, None)\n if not r.avatar_url:\n gravatar_id = self.x_gravatar_id(r, info, None)\n if r.email and not gravatar_id:\n gravatar_id = hashlib.md5(r.email.strip().lower()).hexdigest()\n if gravatar_id:\n r.avatar_url = 'https://secure.gravatar.com/avatar/'+gravatar_id\n r.is_team = self.x_is_team(r, info, False)\n r.extra_info = info\n return r\n\n def get_team_members(self, team_name, page_url=None):\n \"\"\"Given a team_name on the platform, return the team's membership list\n from the API.\n \"\"\"\n default_url = self.api_team_members_path.format(user_name=quote(team_name))\n r = self.api_get(page_url or default_url)\n members, count, pages_urls = self.api_paginator(r, self.api_parser(r))\n members = [self.extract_user_info(m) for m in members]\n return members, count, pages_urls\n\n def get_user_info(self, user_name, sess=None):\n \"\"\"Given a user_name on the platform, get the user's info from the API.\n \"\"\"\n try:\n path = self.api_user_info_path.format(user_name=quote(user_name))\n except KeyError:\n raise Response(404)\n info = self.api_parser(self.api_get(path, sess=sess))\n return self.extract_user_info(info)\n\n def get_user_self_info(self, sess):\n \"\"\"Get the authenticated user's info from the API.\n \"\"\"\n r = self.api_get(self.api_user_self_info_path, sess=sess)\n info = self.extract_user_info(self.api_parser(r))\n token = getattr(sess, 'token', None)\n if token:\n info.token = json.dumps(token)\n return info\n\n\nclass PlatformOAuth1(Platform):\n\n request_token_path = '/oauth/request_token'\n authorize_path = '/oauth/authorize'\n access_token_path = '/oauth/access_token'\n\n def get_auth_session(self, token=None):\n args = ()\n if token:\n args = (token['token'], token['token_secret'])\n return OAuth1Session(self.api_key, self.api_secret, *args,\n callback_uri=self.callback_url)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n r = sess.fetch_request_token(self.auth_url+self.request_token_path)\n url = sess.authorization_url(self.auth_url+self.authorize_path)\n return url, r['oauth_token'], r['oauth_token_secret']\n\n def get_query_id(self, querystring):\n return querystring['oauth_token']\n\n def handle_auth_callback(self, url, token, token_secret):\n sess = self.get_auth_session(dict(token=token, token_secret=token_secret))\n sess.parse_authorization_response(url)\n r = sess.fetch_access_token(self.auth_url+self.access_token_path)\n sess.token = dict(token=r['oauth_token'],\n token_secret=r['oauth_token_secret'])\n return sess\n\n\nclass PlatformOAuth2(Platform):\n\n oauth_default_scope = None\n oauth_email_scope = None\n oauth_payment_scope = None\n\n def get_auth_session(self, state=None, token=None, token_updater=None):\n return OAuth2Session(self.api_key, state=state, token=token,\n token_updater=token_updater,\n redirect_uri=self.callback_url,\n scope=self.oauth_default_scope)\n\n def get_auth_url(self, **kw):\n sess = self.get_auth_session()\n url, state = sess.authorization_url(self.auth_url)\n return url, state, ''\n\n def get_query_id(self, querystring):\n return querystring['state']\n\n def handle_auth_callback(self, url, state, unused_arg):\n sess = self.get_auth_session(state=state)\n sess.fetch_token(self.access_token_url,\n client_secret=self.api_secret,\n authorization_response=url)\n return sess\n", "path": "gratipay/elsewhere/__init__.py"}]}
3,626
362
gh_patches_debug_11704
rasdani/github-patches
git_diff
pymedusa__Medusa-4169
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> qbittorrent: Unable to set the pause for Torrent (new qBit 4.1.0) ### Before submitting your issue: Enable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed) **Branch/Commit:** 77b2091 **OS:** Linux Mint 18.2 Cinnamon **What you did:** I updated to qBittorrent 4.1.0 earlier today, prior to that I was using Qbittorrent 4.0.4, so I figure maybe this is something with Medusa + qBittorrent 4.1.0....I didn't do any manual pause or anything so no idea what it would be pausing or why. I do *not* have this checked: Start torrent paused add .torrent to client but do not start downloading In the Search Settings -> Torrent Search config in Medusa, in case anybody is wondering. **What happened:** Some kind of red error about pausing a torrent with Qbittorrent **What you expected:** No error **Logs:** ``` 2018-05-06 17:59:02 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] Prowl notifications sent. 2018-05-06 17:59:01 ERROR SEARCHQUEUE-DAILY-SEARCH :: [77b2091] qbittorrent: Unable to set the pause for Torrent 2018-05-06 17:59:01 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] qbittorrent: Bad Request 2018-05-06 17:59:00 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] Downloading (censored) with 13 seeders and 2 leechers and size (censored) MB from (censored) ``` </issue> <code> [start of medusa/clients/torrent/qbittorrent_client.py] 1 # coding=utf-8 2 3 """qBittorrent Client.""" 4 5 from __future__ import unicode_literals 6 7 import logging 8 9 from medusa import app 10 from medusa.clients.torrent.generic import GenericClient 11 from medusa.logger.adapters.style import BraceAdapter 12 13 from requests.auth import HTTPDigestAuth 14 15 log = BraceAdapter(logging.getLogger(__name__)) 16 log.logger.addHandler(logging.NullHandler()) 17 18 19 class QBittorrentAPI(GenericClient): 20 """qBittorrent API class.""" 21 22 def __init__(self, host=None, username=None, password=None): 23 """Constructor. 24 25 :param host: 26 :type host: string 27 :param username: 28 :type username: string 29 :param password: 30 :type password: string 31 """ 32 super(QBittorrentAPI, self).__init__('qbittorrent', host, username, password) 33 self.url = self.host 34 self.session.auth = HTTPDigestAuth(self.username, self.password) 35 36 @property 37 def api(self): 38 """Get API version.""" 39 try: 40 self.url = '{host}version/api'.format(host=self.host) 41 version = int(self.session.get(self.url, verify=app.TORRENT_VERIFY_CERT).content) 42 except Exception: 43 version = 1 44 return version 45 46 def _get_auth(self): 47 48 if self.api > 1: 49 self.url = '{host}login'.format(host=self.host) 50 data = { 51 'username': self.username, 52 'password': self.password, 53 } 54 try: 55 self.response = self.session.post(self.url, data=data) 56 except Exception: 57 return None 58 59 else: 60 try: 61 self.response = self.session.get(self.host, verify=app.TORRENT_VERIFY_CERT) 62 self.auth = self.response.content 63 except Exception: 64 return None 65 66 self.session.cookies = self.response.cookies 67 self.auth = self.response.content 68 69 return self.auth if not self.response.status_code == 404 else None 70 71 def _add_torrent_uri(self, result): 72 73 self.url = '{host}command/download'.format(host=self.host) 74 data = { 75 'urls': result.url, 76 } 77 return self._request(method='post', data=data, cookies=self.session.cookies) 78 79 def _add_torrent_file(self, result): 80 81 self.url = '{host}command/upload'.format(host=self.host) 82 files = { 83 'torrents': ( 84 '{result}.torrent'.format(result=result.name), 85 result.content, 86 ), 87 } 88 return self._request(method='post', files=files, cookies=self.session.cookies) 89 90 def _set_torrent_label(self, result): 91 92 label = app.TORRENT_LABEL_ANIME if result.series.is_anime else app.TORRENT_LABEL 93 94 if self.api > 6 and label: 95 label_key = 'Category' if self.api >= 10 else 'Label' 96 self.url = '{host}command/set{key}'.format( 97 host=self.host, 98 key=label_key, 99 ) 100 data = { 101 'hashes': result.hash.lower(), 102 label_key.lower(): label.replace(' ', '_'), 103 } 104 return self._request(method='post', data=data, cookies=self.session.cookies) 105 return True 106 107 def _set_torrent_priority(self, result): 108 109 self.url = '{host}command/{method}Prio'.format(host=self.host, 110 method='increase' if result.priority == 1 else 'decrease') 111 data = { 112 'hashes': result.hash.lower(), 113 } 114 ok = self._request(method='post', data=data, cookies=self.session.cookies) 115 116 if self.response.status_code == 403: 117 log.info('{name}: Unable to set torrent priority because torrent queueing' 118 ' is disabled in {name} settings.', {'name': self.name}) 119 ok = True 120 121 return ok 122 123 def _set_torrent_pause(self, result): 124 self.url = '{host}command/{state}'.format(host=self.host, 125 state='pause' if app.TORRENT_PAUSED else 'resume') 126 data = { 127 'hash': result.hash, 128 } 129 return self._request(method='post', data=data, cookies=self.session.cookies) 130 131 def remove_torrent(self, info_hash): 132 """Remove torrent from client using given info_hash. 133 134 :param info_hash: 135 :type info_hash: string 136 :return 137 :rtype: bool 138 """ 139 self.url = '{host}command/deletePerm'.format(host=self.host) 140 data = { 141 'hashes': info_hash.lower(), 142 } 143 144 return self._request(method='post', data=data, cookies=self.session.cookies) 145 146 147 api = QBittorrentAPI 148 [end of medusa/clients/torrent/qbittorrent_client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/medusa/clients/torrent/qbittorrent_client.py b/medusa/clients/torrent/qbittorrent_client.py --- a/medusa/clients/torrent/qbittorrent_client.py +++ b/medusa/clients/torrent/qbittorrent_client.py @@ -123,8 +123,9 @@ def _set_torrent_pause(self, result): self.url = '{host}command/{state}'.format(host=self.host, state='pause' if app.TORRENT_PAUSED else 'resume') + hashes_key = 'hashes' if self.api >= 18 else 'hash' data = { - 'hash': result.hash, + hashes_key: result.hash.lower(), } return self._request(method='post', data=data, cookies=self.session.cookies)
{"golden_diff": "diff --git a/medusa/clients/torrent/qbittorrent_client.py b/medusa/clients/torrent/qbittorrent_client.py\n--- a/medusa/clients/torrent/qbittorrent_client.py\n+++ b/medusa/clients/torrent/qbittorrent_client.py\n@@ -123,8 +123,9 @@\n def _set_torrent_pause(self, result):\n self.url = '{host}command/{state}'.format(host=self.host,\n state='pause' if app.TORRENT_PAUSED else 'resume')\n+ hashes_key = 'hashes' if self.api >= 18 else 'hash'\n data = {\n- 'hash': result.hash,\n+ hashes_key: result.hash.lower(),\n }\n return self._request(method='post', data=data, cookies=self.session.cookies)\n", "issue": "qbittorrent: Unable to set the pause for Torrent (new qBit 4.1.0)\n### Before submitting your issue:\r\n\r\nEnable debug logging in Medusa settings, reproduce the error (be sure to disable after the bug is fixed)\r\n\r\n**Branch/Commit:**\r\n77b2091\r\n**OS:**\r\nLinux Mint 18.2 Cinnamon\r\n**What you did:**\r\nI updated to qBittorrent 4.1.0 earlier today, prior to that I was using Qbittorrent 4.0.4, so I figure maybe this is something with Medusa + qBittorrent 4.1.0....I didn't do any manual pause or anything so no idea what it would be pausing or why.\r\nI do *not* have this checked:\r\nStart torrent paused\r\nadd .torrent to client but do not start downloading\r\nIn the Search Settings -> Torrent Search config in Medusa, in case anybody is wondering.\r\n**What happened:**\r\nSome kind of red error about pausing a torrent with Qbittorrent\r\n**What you expected:**\r\nNo error\r\n**Logs:**\r\n```\r\n2018-05-06 17:59:02 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] Prowl notifications sent.\r\n2018-05-06 17:59:01 ERROR SEARCHQUEUE-DAILY-SEARCH :: [77b2091] qbittorrent: Unable to set the pause for Torrent\r\n2018-05-06 17:59:01 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] qbittorrent: Bad Request\r\n2018-05-06 17:59:00 INFO SEARCHQUEUE-DAILY-SEARCH :: [77b2091] Downloading (censored) with 13 seeders and 2 leechers and size (censored) MB from (censored)\r\n```\r\n\n", "before_files": [{"content": "# coding=utf-8\n\n\"\"\"qBittorrent Client.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nfrom medusa import app\nfrom medusa.clients.torrent.generic import GenericClient\nfrom medusa.logger.adapters.style import BraceAdapter\n\nfrom requests.auth import HTTPDigestAuth\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass QBittorrentAPI(GenericClient):\n \"\"\"qBittorrent API class.\"\"\"\n\n def __init__(self, host=None, username=None, password=None):\n \"\"\"Constructor.\n\n :param host:\n :type host: string\n :param username:\n :type username: string\n :param password:\n :type password: string\n \"\"\"\n super(QBittorrentAPI, self).__init__('qbittorrent', host, username, password)\n self.url = self.host\n self.session.auth = HTTPDigestAuth(self.username, self.password)\n\n @property\n def api(self):\n \"\"\"Get API version.\"\"\"\n try:\n self.url = '{host}version/api'.format(host=self.host)\n version = int(self.session.get(self.url, verify=app.TORRENT_VERIFY_CERT).content)\n except Exception:\n version = 1\n return version\n\n def _get_auth(self):\n\n if self.api > 1:\n self.url = '{host}login'.format(host=self.host)\n data = {\n 'username': self.username,\n 'password': self.password,\n }\n try:\n self.response = self.session.post(self.url, data=data)\n except Exception:\n return None\n\n else:\n try:\n self.response = self.session.get(self.host, verify=app.TORRENT_VERIFY_CERT)\n self.auth = self.response.content\n except Exception:\n return None\n\n self.session.cookies = self.response.cookies\n self.auth = self.response.content\n\n return self.auth if not self.response.status_code == 404 else None\n\n def _add_torrent_uri(self, result):\n\n self.url = '{host}command/download'.format(host=self.host)\n data = {\n 'urls': result.url,\n }\n return self._request(method='post', data=data, cookies=self.session.cookies)\n\n def _add_torrent_file(self, result):\n\n self.url = '{host}command/upload'.format(host=self.host)\n files = {\n 'torrents': (\n '{result}.torrent'.format(result=result.name),\n result.content,\n ),\n }\n return self._request(method='post', files=files, cookies=self.session.cookies)\n\n def _set_torrent_label(self, result):\n\n label = app.TORRENT_LABEL_ANIME if result.series.is_anime else app.TORRENT_LABEL\n\n if self.api > 6 and label:\n label_key = 'Category' if self.api >= 10 else 'Label'\n self.url = '{host}command/set{key}'.format(\n host=self.host,\n key=label_key,\n )\n data = {\n 'hashes': result.hash.lower(),\n label_key.lower(): label.replace(' ', '_'),\n }\n return self._request(method='post', data=data, cookies=self.session.cookies)\n return True\n\n def _set_torrent_priority(self, result):\n\n self.url = '{host}command/{method}Prio'.format(host=self.host,\n method='increase' if result.priority == 1 else 'decrease')\n data = {\n 'hashes': result.hash.lower(),\n }\n ok = self._request(method='post', data=data, cookies=self.session.cookies)\n\n if self.response.status_code == 403:\n log.info('{name}: Unable to set torrent priority because torrent queueing'\n ' is disabled in {name} settings.', {'name': self.name})\n ok = True\n\n return ok\n\n def _set_torrent_pause(self, result):\n self.url = '{host}command/{state}'.format(host=self.host,\n state='pause' if app.TORRENT_PAUSED else 'resume')\n data = {\n 'hash': result.hash,\n }\n return self._request(method='post', data=data, cookies=self.session.cookies)\n\n def remove_torrent(self, info_hash):\n \"\"\"Remove torrent from client using given info_hash.\n\n :param info_hash:\n :type info_hash: string\n :return\n :rtype: bool\n \"\"\"\n self.url = '{host}command/deletePerm'.format(host=self.host)\n data = {\n 'hashes': info_hash.lower(),\n }\n\n return self._request(method='post', data=data, cookies=self.session.cookies)\n\n\napi = QBittorrentAPI\n", "path": "medusa/clients/torrent/qbittorrent_client.py"}]}
2,349
182
gh_patches_debug_42000
rasdani/github-patches
git_diff
ansible__ansible-modules-core-4289
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> os_server_actions nova rebuild server ##### ISSUE TYPE Feature Idea ##### COMPONENT NAME os_server module ##### ANSIBLE VERSION N/A ##### SUMMARY Could it be possible to implement the nova rebuild server action in the os_server_actions module? We use openstack in our CI pipeline, and this is far more cheaper then booting a new server every time we test. </issue> <code> [start of cloud/openstack/os_server_actions.py] 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # Copyright (c) 2015, Jesse Keating <[email protected]> 5 # 6 # This module is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This software is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this software. If not, see <http://www.gnu.org/licenses/>. 18 19 20 try: 21 import shade 22 from shade import meta 23 HAS_SHADE = True 24 except ImportError: 25 HAS_SHADE = False 26 27 28 DOCUMENTATION = ''' 29 --- 30 module: os_server_actions 31 short_description: Perform actions on Compute Instances from OpenStack 32 extends_documentation_fragment: openstack 33 version_added: "2.0" 34 author: "Jesse Keating (@j2sol)" 35 description: 36 - Perform server actions on an existing compute instance from OpenStack. 37 This module does not return any data other than changed true/false. 38 options: 39 server: 40 description: 41 - Name or ID of the instance 42 required: true 43 wait: 44 description: 45 - If the module should wait for the instance action to be performed. 46 required: false 47 default: 'yes' 48 timeout: 49 description: 50 - The amount of time the module should wait for the instance to perform 51 the requested action. 52 required: false 53 default: 180 54 action: 55 description: 56 - Perform the given action. The lock and unlock actions always return 57 changed as the servers API does not provide lock status. 58 choices: [stop, start, pause, unpause, lock, unlock, suspend, resume] 59 default: present 60 requirements: 61 - "python >= 2.6" 62 - "shade" 63 ''' 64 65 EXAMPLES = ''' 66 # Pauses a compute instance 67 - os_server_actions: 68 action: pause 69 auth: 70 auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0 71 username: admin 72 password: admin 73 project_name: admin 74 server: vm1 75 timeout: 200 76 ''' 77 78 _action_map = {'stop': 'SHUTOFF', 79 'start': 'ACTIVE', 80 'pause': 'PAUSED', 81 'unpause': 'ACTIVE', 82 'lock': 'ACTIVE', # API doesn't show lock/unlock status 83 'unlock': 'ACTIVE', 84 'suspend': 'SUSPENDED', 85 'resume': 'ACTIVE',} 86 87 _admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock'] 88 89 def _wait(timeout, cloud, server, action): 90 """Wait for the server to reach the desired state for the given action.""" 91 92 for count in shade._utils._iterate_timeout( 93 timeout, 94 "Timeout waiting for server to complete %s" % action): 95 try: 96 server = cloud.get_server(server.id) 97 except Exception: 98 continue 99 100 if server.status == _action_map[action]: 101 return 102 103 if server.status == 'ERROR': 104 module.fail_json(msg="Server reached ERROR state while attempting to %s" % action) 105 106 def _system_state_change(action, status): 107 """Check if system state would change.""" 108 if status == _action_map[action]: 109 return False 110 return True 111 112 def main(): 113 argument_spec = openstack_full_argument_spec( 114 server=dict(required=True), 115 action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause', 116 'lock', 'unlock', 'suspend', 'resume']), 117 ) 118 119 module_kwargs = openstack_module_kwargs() 120 module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) 121 122 if not HAS_SHADE: 123 module.fail_json(msg='shade is required for this module') 124 125 action = module.params['action'] 126 wait = module.params['wait'] 127 timeout = module.params['timeout'] 128 129 try: 130 if action in _admin_actions: 131 cloud = shade.operator_cloud(**module.params) 132 else: 133 cloud = shade.openstack_cloud(**module.params) 134 server = cloud.get_server(module.params['server']) 135 if not server: 136 module.fail_json(msg='Could not find server %s' % server) 137 status = server.status 138 139 if module.check_mode: 140 module.exit_json(changed=_system_state_change(action, status)) 141 142 if action == 'stop': 143 if not _system_state_change(action, status): 144 module.exit_json(changed=False) 145 146 cloud.nova_client.servers.stop(server=server.id) 147 if wait: 148 _wait(timeout, cloud, server, action) 149 module.exit_json(changed=True) 150 151 if action == 'start': 152 if not _system_state_change(action, status): 153 module.exit_json(changed=False) 154 155 cloud.nova_client.servers.start(server=server.id) 156 if wait: 157 _wait(timeout, cloud, server, action) 158 module.exit_json(changed=True) 159 160 if action == 'pause': 161 if not _system_state_change(action, status): 162 module.exit_json(changed=False) 163 164 cloud.nova_client.servers.pause(server=server.id) 165 if wait: 166 _wait(timeout, cloud, server, action) 167 module.exit_json(changed=True) 168 169 elif action == 'unpause': 170 if not _system_state_change(action, status): 171 module.exit_json(changed=False) 172 173 cloud.nova_client.servers.unpause(server=server.id) 174 if wait: 175 _wait(timeout, cloud, server, action) 176 module.exit_json(changed=True) 177 178 elif action == 'lock': 179 # lock doesn't set a state, just do it 180 cloud.nova_client.servers.lock(server=server.id) 181 module.exit_json(changed=True) 182 183 elif action == 'unlock': 184 # unlock doesn't set a state, just do it 185 cloud.nova_client.servers.unlock(server=server.id) 186 module.exit_json(changed=True) 187 188 elif action == 'suspend': 189 if not _system_state_change(action, status): 190 module.exit_json(changed=False) 191 192 cloud.nova_client.servers.suspend(server=server.id) 193 if wait: 194 _wait(timeout, cloud, server, action) 195 module.exit_json(changed=True) 196 197 elif action == 'resume': 198 if not _system_state_change(action, status): 199 module.exit_json(changed=False) 200 201 cloud.nova_client.servers.resume(server=server.id) 202 if wait: 203 _wait(timeout, cloud, server, action) 204 module.exit_json(changed=True) 205 206 except shade.OpenStackCloudException as e: 207 module.fail_json(msg=str(e), extra_data=e.extra_data) 208 209 # this is magic, see lib/ansible/module_common.py 210 from ansible.module_utils.basic import * 211 from ansible.module_utils.openstack import * 212 if __name__ == '__main__': 213 main() 214 [end of cloud/openstack/os_server_actions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py --- a/cloud/openstack/os_server_actions.py +++ b/cloud/openstack/os_server_actions.py @@ -35,6 +35,7 @@ description: - Perform server actions on an existing compute instance from OpenStack. This module does not return any data other than changed true/false. + When I(action) is 'rebuild', then I(image) parameter is required. options: server: description: @@ -55,8 +56,14 @@ description: - Perform the given action. The lock and unlock actions always return changed as the servers API does not provide lock status. - choices: [stop, start, pause, unpause, lock, unlock, suspend, resume] + choices: [stop, start, pause, unpause, lock, unlock, suspend, resume, + rebuild] default: present + image: + description: + - Image the server should be rebuilt with + default: null + version_added: "2.3" requirements: - "python >= 2.6" - "shade" @@ -82,7 +89,8 @@ 'lock': 'ACTIVE', # API doesn't show lock/unlock status 'unlock': 'ACTIVE', 'suspend': 'SUSPENDED', - 'resume': 'ACTIVE',} + 'resume': 'ACTIVE', + 'rebuild': 'ACTIVE'} _admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock'] @@ -113,11 +121,15 @@ argument_spec = openstack_full_argument_spec( server=dict(required=True), action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause', - 'lock', 'unlock', 'suspend', 'resume']), + 'lock', 'unlock', 'suspend', 'resume', + 'rebuild']), + image=dict(required=False), ) module_kwargs = openstack_module_kwargs() - module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs) + module = AnsibleModule(argument_spec, supports_check_mode=True, + required_if=[('action', 'rebuild', ['image'])], + **module_kwargs) if not HAS_SHADE: module.fail_json(msg='shade is required for this module') @@ -125,6 +137,7 @@ action = module.params['action'] wait = module.params['wait'] timeout = module.params['timeout'] + image = module.params['image'] try: if action in _admin_actions: @@ -203,6 +216,18 @@ _wait(timeout, cloud, server, action) module.exit_json(changed=True) + elif action == 'rebuild': + image = cloud.get_image(image) + + if image is None: + module.fail_json(msg="Image does not exist") + + # rebuild doesn't set a state, just do it + cloud.nova_client.servers.rebuild(server=server.id, image=image.id) + if wait: + _wait(timeout, cloud, server, action) + module.exit_json(changed=True) + except shade.OpenStackCloudException as e: module.fail_json(msg=str(e), extra_data=e.extra_data)
{"golden_diff": "diff --git a/cloud/openstack/os_server_actions.py b/cloud/openstack/os_server_actions.py\n--- a/cloud/openstack/os_server_actions.py\n+++ b/cloud/openstack/os_server_actions.py\n@@ -35,6 +35,7 @@\n description:\n - Perform server actions on an existing compute instance from OpenStack.\n This module does not return any data other than changed true/false.\n+ When I(action) is 'rebuild', then I(image) parameter is required.\n options:\n server:\n description:\n@@ -55,8 +56,14 @@\n description:\n - Perform the given action. The lock and unlock actions always return\n changed as the servers API does not provide lock status.\n- choices: [stop, start, pause, unpause, lock, unlock, suspend, resume]\n+ choices: [stop, start, pause, unpause, lock, unlock, suspend, resume,\n+ rebuild]\n default: present\n+ image:\n+ description:\n+ - Image the server should be rebuilt with\n+ default: null\n+ version_added: \"2.3\"\n requirements:\n - \"python >= 2.6\"\n - \"shade\"\n@@ -82,7 +89,8 @@\n 'lock': 'ACTIVE', # API doesn't show lock/unlock status\n 'unlock': 'ACTIVE',\n 'suspend': 'SUSPENDED',\n- 'resume': 'ACTIVE',}\n+ 'resume': 'ACTIVE',\n+ 'rebuild': 'ACTIVE'}\n \n _admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']\n \n@@ -113,11 +121,15 @@\n argument_spec = openstack_full_argument_spec(\n server=dict(required=True),\n action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',\n- 'lock', 'unlock', 'suspend', 'resume']),\n+ 'lock', 'unlock', 'suspend', 'resume',\n+ 'rebuild']),\n+ image=dict(required=False),\n )\n \n module_kwargs = openstack_module_kwargs()\n- module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)\n+ module = AnsibleModule(argument_spec, supports_check_mode=True,\n+ required_if=[('action', 'rebuild', ['image'])],\n+ **module_kwargs)\n \n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n@@ -125,6 +137,7 @@\n action = module.params['action']\n wait = module.params['wait']\n timeout = module.params['timeout']\n+ image = module.params['image']\n \n try:\n if action in _admin_actions:\n@@ -203,6 +216,18 @@\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n \n+ elif action == 'rebuild':\n+ image = cloud.get_image(image)\n+\n+ if image is None:\n+ module.fail_json(msg=\"Image does not exist\")\n+\n+ # rebuild doesn't set a state, just do it\n+ cloud.nova_client.servers.rebuild(server=server.id, image=image.id)\n+ if wait:\n+ _wait(timeout, cloud, server, action)\n+ module.exit_json(changed=True)\n+\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e), extra_data=e.extra_data)\n", "issue": "os_server_actions nova rebuild server\n##### ISSUE TYPE\n\nFeature Idea\n##### COMPONENT NAME\n\nos_server module\n##### ANSIBLE VERSION\n\nN/A\n##### SUMMARY\n\nCould it be possible to implement the nova rebuild server action in the os_server_actions module?\n\nWe use openstack in our CI pipeline, and this is far more cheaper then booting a new server every\ntime we test.\n\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# Copyright (c) 2015, Jesse Keating <[email protected]>\n#\n# This module is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this software. If not, see <http://www.gnu.org/licenses/>.\n\n\ntry:\n import shade\n from shade import meta\n HAS_SHADE = True\nexcept ImportError:\n HAS_SHADE = False\n\n\nDOCUMENTATION = '''\n---\nmodule: os_server_actions\nshort_description: Perform actions on Compute Instances from OpenStack\nextends_documentation_fragment: openstack\nversion_added: \"2.0\"\nauthor: \"Jesse Keating (@j2sol)\"\ndescription:\n - Perform server actions on an existing compute instance from OpenStack.\n This module does not return any data other than changed true/false.\noptions:\n server:\n description:\n - Name or ID of the instance\n required: true\n wait:\n description:\n - If the module should wait for the instance action to be performed.\n required: false\n default: 'yes'\n timeout:\n description:\n - The amount of time the module should wait for the instance to perform\n the requested action.\n required: false\n default: 180\n action:\n description:\n - Perform the given action. The lock and unlock actions always return\n changed as the servers API does not provide lock status.\n choices: [stop, start, pause, unpause, lock, unlock, suspend, resume]\n default: present\nrequirements:\n - \"python >= 2.6\"\n - \"shade\"\n'''\n\nEXAMPLES = '''\n# Pauses a compute instance\n- os_server_actions:\n action: pause\n auth:\n auth_url: https://mycloud.openstack.blueboxgrid.com:5001/v2.0\n username: admin\n password: admin\n project_name: admin\n server: vm1\n timeout: 200\n'''\n\n_action_map = {'stop': 'SHUTOFF',\n 'start': 'ACTIVE',\n 'pause': 'PAUSED',\n 'unpause': 'ACTIVE',\n 'lock': 'ACTIVE', # API doesn't show lock/unlock status\n 'unlock': 'ACTIVE',\n 'suspend': 'SUSPENDED',\n 'resume': 'ACTIVE',}\n\n_admin_actions = ['pause', 'unpause', 'suspend', 'resume', 'lock', 'unlock']\n\ndef _wait(timeout, cloud, server, action):\n \"\"\"Wait for the server to reach the desired state for the given action.\"\"\"\n\n for count in shade._utils._iterate_timeout(\n timeout,\n \"Timeout waiting for server to complete %s\" % action):\n try:\n server = cloud.get_server(server.id)\n except Exception:\n continue\n\n if server.status == _action_map[action]:\n return\n\n if server.status == 'ERROR':\n module.fail_json(msg=\"Server reached ERROR state while attempting to %s\" % action)\n\ndef _system_state_change(action, status):\n \"\"\"Check if system state would change.\"\"\"\n if status == _action_map[action]:\n return False\n return True\n\ndef main():\n argument_spec = openstack_full_argument_spec(\n server=dict(required=True),\n action=dict(required=True, choices=['stop', 'start', 'pause', 'unpause',\n 'lock', 'unlock', 'suspend', 'resume']),\n )\n\n module_kwargs = openstack_module_kwargs()\n module = AnsibleModule(argument_spec, supports_check_mode=True, **module_kwargs)\n\n if not HAS_SHADE:\n module.fail_json(msg='shade is required for this module')\n\n action = module.params['action']\n wait = module.params['wait']\n timeout = module.params['timeout']\n\n try:\n if action in _admin_actions:\n cloud = shade.operator_cloud(**module.params)\n else:\n cloud = shade.openstack_cloud(**module.params)\n server = cloud.get_server(module.params['server'])\n if not server:\n module.fail_json(msg='Could not find server %s' % server)\n status = server.status\n\n if module.check_mode:\n module.exit_json(changed=_system_state_change(action, status))\n\n if action == 'stop':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.stop(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n if action == 'start':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.start(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n if action == 'pause':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.pause(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n elif action == 'unpause':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.unpause(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n elif action == 'lock':\n # lock doesn't set a state, just do it\n cloud.nova_client.servers.lock(server=server.id)\n module.exit_json(changed=True)\n\n elif action == 'unlock':\n # unlock doesn't set a state, just do it\n cloud.nova_client.servers.unlock(server=server.id)\n module.exit_json(changed=True)\n\n elif action == 'suspend':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.suspend(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n elif action == 'resume':\n if not _system_state_change(action, status):\n module.exit_json(changed=False)\n\n cloud.nova_client.servers.resume(server=server.id)\n if wait:\n _wait(timeout, cloud, server, action)\n module.exit_json(changed=True)\n\n except shade.OpenStackCloudException as e:\n module.fail_json(msg=str(e), extra_data=e.extra_data)\n\n# this is magic, see lib/ansible/module_common.py\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.openstack import *\nif __name__ == '__main__':\n main()\n", "path": "cloud/openstack/os_server_actions.py"}]}
2,704
752
gh_patches_debug_35031
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-3215
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exported CSV file does not seem to contain information on what shelf my books are on **Describe the bug** I exported the CSV-file from my account on bookwyrm.social, and it does not seem to contain information on what books I've read and what I want to read and what I am currently reading (Shelves) **To Reproduce** Steps to reproduce the behavior: 1. Go to settings 2. click on CSV export 3. Click on "Download file" 4. No apparent status-column in the exported csv-file **Expected behavior** A column indicating what shelf a book is on **Instance** Exported from bookwyrm.social **Desktop (please complete the following information):** - OS: MacOS - Browser Firefox - Version 113b </issue> <code> [start of bookwyrm/views/preferences/export.py] 1 """ Let users export their book data """ 2 from datetime import timedelta 3 import csv 4 import io 5 6 from django.contrib.auth.decorators import login_required 7 from django.core.paginator import Paginator 8 from django.db.models import Q 9 from django.http import HttpResponse 10 from django.template.response import TemplateResponse 11 from django.utils import timezone 12 from django.views import View 13 from django.utils.decorators import method_decorator 14 from django.shortcuts import redirect 15 16 from bookwyrm import models 17 from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob 18 from bookwyrm.settings import PAGE_LENGTH 19 20 # pylint: disable=no-self-use,too-many-locals 21 @method_decorator(login_required, name="dispatch") 22 class Export(View): 23 """Let users export data""" 24 25 def get(self, request): 26 """Request csv file""" 27 return TemplateResponse(request, "preferences/export.html") 28 29 def post(self, request): 30 """Download the csv file of a user's book data""" 31 books = models.Edition.viewer_aware_objects(request.user) 32 books_shelves = books.filter(Q(shelves__user=request.user)).distinct() 33 books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct() 34 books_review = books.filter(Q(review__user=request.user)).distinct() 35 books_comment = books.filter(Q(comment__user=request.user)).distinct() 36 books_quotation = books.filter(Q(quotation__user=request.user)).distinct() 37 38 books = set( 39 list(books_shelves) 40 + list(books_readthrough) 41 + list(books_review) 42 + list(books_comment) 43 + list(books_quotation) 44 ) 45 46 csv_string = io.StringIO() 47 writer = csv.writer(csv_string) 48 49 deduplication_fields = [ 50 f.name 51 for f in models.Edition._meta.get_fields() # pylint: disable=protected-access 52 if getattr(f, "deduplication_field", False) 53 ] 54 fields = ( 55 ["title", "author_text"] 56 + deduplication_fields 57 + ["start_date", "finish_date", "stopped_date"] 58 + ["rating", "review_name", "review_cw", "review_content"] 59 ) 60 writer.writerow(fields) 61 62 for book in books: 63 # I think this is more efficient than doing a subquery in the view? but idk 64 review_rating = ( 65 models.Review.objects.filter( 66 user=request.user, book=book, rating__isnull=False 67 ) 68 .order_by("-published_date") 69 .first() 70 ) 71 72 book.rating = review_rating.rating if review_rating else None 73 74 readthrough = ( 75 models.ReadThrough.objects.filter(user=request.user, book=book) 76 .order_by("-start_date", "-finish_date") 77 .first() 78 ) 79 if readthrough: 80 book.start_date = ( 81 readthrough.start_date.date() if readthrough.start_date else None 82 ) 83 book.finish_date = ( 84 readthrough.finish_date.date() if readthrough.finish_date else None 85 ) 86 book.stopped_date = ( 87 readthrough.stopped_date.date() 88 if readthrough.stopped_date 89 else None 90 ) 91 92 review = ( 93 models.Review.objects.filter( 94 user=request.user, book=book, content__isnull=False 95 ) 96 .order_by("-published_date") 97 .first() 98 ) 99 if review: 100 book.review_name = review.name 101 book.review_cw = review.content_warning 102 book.review_content = review.raw_content 103 writer.writerow([getattr(book, field, "") or "" for field in fields]) 104 105 return HttpResponse( 106 csv_string.getvalue(), 107 content_type="text/csv", 108 headers={ 109 "Content-Disposition": 'attachment; filename="bookwyrm-export.csv"' 110 }, 111 ) 112 113 114 # pylint: disable=no-self-use 115 @method_decorator(login_required, name="dispatch") 116 class ExportUser(View): 117 """Let users export user data to import into another Bookwyrm instance""" 118 119 def get(self, request): 120 """Request tar file""" 121 122 jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by( 123 "-created_date" 124 ) 125 site = models.SiteSettings.objects.get() 126 hours = site.user_import_time_limit 127 allowed = ( 128 jobs.first().created_date < timezone.now() - timedelta(hours=hours) 129 if jobs.first() 130 else True 131 ) 132 next_available = ( 133 jobs.first().created_date + timedelta(hours=hours) if not allowed else False 134 ) 135 paginated = Paginator(jobs, PAGE_LENGTH) 136 page = paginated.get_page(request.GET.get("page")) 137 data = { 138 "jobs": page, 139 "next_available": next_available, 140 "page_range": paginated.get_elided_page_range( 141 page.number, on_each_side=2, on_ends=1 142 ), 143 } 144 145 return TemplateResponse(request, "preferences/export-user.html", data) 146 147 def post(self, request): 148 """Download the json file of a user's data""" 149 150 job = BookwyrmExportJob.objects.create(user=request.user) 151 job.start_job() 152 153 return redirect("prefs-user-export") 154 155 156 @method_decorator(login_required, name="dispatch") 157 class ExportArchive(View): 158 """Serve the archive file""" 159 160 def get(self, request, archive_id): 161 """download user export file""" 162 export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user) 163 return HttpResponse( 164 export.export_data, 165 content_type="application/gzip", 166 headers={ 167 "Content-Disposition": 'attachment; filename="bookwyrm-account-export.tar.gz"' # pylint: disable=line-too-long 168 }, 169 ) 170 [end of bookwyrm/views/preferences/export.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py --- a/bookwyrm/views/preferences/export.py +++ b/bookwyrm/views/preferences/export.py @@ -17,6 +17,7 @@ from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob from bookwyrm.settings import PAGE_LENGTH + # pylint: disable=no-self-use,too-many-locals @method_decorator(login_required, name="dispatch") class Export(View): @@ -54,8 +55,19 @@ fields = ( ["title", "author_text"] + deduplication_fields - + ["start_date", "finish_date", "stopped_date"] - + ["rating", "review_name", "review_cw", "review_content"] + + [ + "start_date", + "finish_date", + "stopped_date", + "rating", + "review_name", + "review_cw", + "review_content", + "review_published", + "shelf", + "shelf_name", + "shelf_date", + ] ) writer.writerow(fields) @@ -97,9 +109,27 @@ .first() ) if review: + book.review_published = ( + review.published_date.date() if review.published_date else None + ) book.review_name = review.name book.review_cw = review.content_warning - book.review_content = review.raw_content + book.review_content = ( + review.raw_content if review.raw_content else review.content + ) # GoodReads imported reviews do not have raw_content, but content. + + shelfbook = ( + models.ShelfBook.objects.filter(user=request.user, book=book) + .order_by("-shelved_date", "-created_date", "-updated_date") + .last() + ) + if shelfbook: + book.shelf = shelfbook.shelf.identifier + book.shelf_name = shelfbook.shelf.name + book.shelf_date = ( + shelfbook.shelved_date.date() if shelfbook.shelved_date else None + ) + writer.writerow([getattr(book, field, "") or "" for field in fields]) return HttpResponse(
{"golden_diff": "diff --git a/bookwyrm/views/preferences/export.py b/bookwyrm/views/preferences/export.py\n--- a/bookwyrm/views/preferences/export.py\n+++ b/bookwyrm/views/preferences/export.py\n@@ -17,6 +17,7 @@\n from bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\n from bookwyrm.settings import PAGE_LENGTH\n \n+\n # pylint: disable=no-self-use,too-many-locals\n @method_decorator(login_required, name=\"dispatch\")\n class Export(View):\n@@ -54,8 +55,19 @@\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n- + [\"start_date\", \"finish_date\", \"stopped_date\"]\n- + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n+ + [\n+ \"start_date\",\n+ \"finish_date\",\n+ \"stopped_date\",\n+ \"rating\",\n+ \"review_name\",\n+ \"review_cw\",\n+ \"review_content\",\n+ \"review_published\",\n+ \"shelf\",\n+ \"shelf_name\",\n+ \"shelf_date\",\n+ ]\n )\n writer.writerow(fields)\n \n@@ -97,9 +109,27 @@\n .first()\n )\n if review:\n+ book.review_published = (\n+ review.published_date.date() if review.published_date else None\n+ )\n book.review_name = review.name\n book.review_cw = review.content_warning\n- book.review_content = review.raw_content\n+ book.review_content = (\n+ review.raw_content if review.raw_content else review.content\n+ ) # GoodReads imported reviews do not have raw_content, but content.\n+\n+ shelfbook = (\n+ models.ShelfBook.objects.filter(user=request.user, book=book)\n+ .order_by(\"-shelved_date\", \"-created_date\", \"-updated_date\")\n+ .last()\n+ )\n+ if shelfbook:\n+ book.shelf = shelfbook.shelf.identifier\n+ book.shelf_name = shelfbook.shelf.name\n+ book.shelf_date = (\n+ shelfbook.shelved_date.date() if shelfbook.shelved_date else None\n+ )\n+\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n \n return HttpResponse(\n", "issue": "Exported CSV file does not seem to contain information on what shelf my books are on\n**Describe the bug**\r\nI exported the CSV-file from my account on bookwyrm.social, and it does not seem to contain information on what books I've read and what I want to read and what I am currently reading (Shelves)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to settings\r\n2. click on CSV export\r\n3. Click on \"Download file\"\r\n4. No apparent status-column in the exported csv-file\r\n\r\n**Expected behavior**\r\nA column indicating what shelf a book is on\r\n\r\n**Instance**\r\nExported from bookwyrm.social\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS\r\n - Browser Firefox\r\n - Version 113b\n", "before_files": [{"content": "\"\"\" Let users export their book data \"\"\"\nfrom datetime import timedelta\nimport csv\nimport io\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import redirect\n\nfrom bookwyrm import models\nfrom bookwyrm.models.bookwyrm_export_job import BookwyrmExportJob\nfrom bookwyrm.settings import PAGE_LENGTH\n\n# pylint: disable=no-self-use,too-many-locals\n@method_decorator(login_required, name=\"dispatch\")\nclass Export(View):\n \"\"\"Let users export data\"\"\"\n\n def get(self, request):\n \"\"\"Request csv file\"\"\"\n return TemplateResponse(request, \"preferences/export.html\")\n\n def post(self, request):\n \"\"\"Download the csv file of a user's book data\"\"\"\n books = models.Edition.viewer_aware_objects(request.user)\n books_shelves = books.filter(Q(shelves__user=request.user)).distinct()\n books_readthrough = books.filter(Q(readthrough__user=request.user)).distinct()\n books_review = books.filter(Q(review__user=request.user)).distinct()\n books_comment = books.filter(Q(comment__user=request.user)).distinct()\n books_quotation = books.filter(Q(quotation__user=request.user)).distinct()\n\n books = set(\n list(books_shelves)\n + list(books_readthrough)\n + list(books_review)\n + list(books_comment)\n + list(books_quotation)\n )\n\n csv_string = io.StringIO()\n writer = csv.writer(csv_string)\n\n deduplication_fields = [\n f.name\n for f in models.Edition._meta.get_fields() # pylint: disable=protected-access\n if getattr(f, \"deduplication_field\", False)\n ]\n fields = (\n [\"title\", \"author_text\"]\n + deduplication_fields\n + [\"start_date\", \"finish_date\", \"stopped_date\"]\n + [\"rating\", \"review_name\", \"review_cw\", \"review_content\"]\n )\n writer.writerow(fields)\n\n for book in books:\n # I think this is more efficient than doing a subquery in the view? but idk\n review_rating = (\n models.Review.objects.filter(\n user=request.user, book=book, rating__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n\n book.rating = review_rating.rating if review_rating else None\n\n readthrough = (\n models.ReadThrough.objects.filter(user=request.user, book=book)\n .order_by(\"-start_date\", \"-finish_date\")\n .first()\n )\n if readthrough:\n book.start_date = (\n readthrough.start_date.date() if readthrough.start_date else None\n )\n book.finish_date = (\n readthrough.finish_date.date() if readthrough.finish_date else None\n )\n book.stopped_date = (\n readthrough.stopped_date.date()\n if readthrough.stopped_date\n else None\n )\n\n review = (\n models.Review.objects.filter(\n user=request.user, book=book, content__isnull=False\n )\n .order_by(\"-published_date\")\n .first()\n )\n if review:\n book.review_name = review.name\n book.review_cw = review.content_warning\n book.review_content = review.raw_content\n writer.writerow([getattr(book, field, \"\") or \"\" for field in fields])\n\n return HttpResponse(\n csv_string.getvalue(),\n content_type=\"text/csv\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-export.csv\"'\n },\n )\n\n\n# pylint: disable=no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportUser(View):\n \"\"\"Let users export user data to import into another Bookwyrm instance\"\"\"\n\n def get(self, request):\n \"\"\"Request tar file\"\"\"\n\n jobs = BookwyrmExportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n )\n site = models.SiteSettings.objects.get()\n hours = site.user_import_time_limit\n allowed = (\n jobs.first().created_date < timezone.now() - timedelta(hours=hours)\n if jobs.first()\n else True\n )\n next_available = (\n jobs.first().created_date + timedelta(hours=hours) if not allowed else False\n )\n paginated = Paginator(jobs, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"jobs\": page,\n \"next_available\": next_available,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n\n return TemplateResponse(request, \"preferences/export-user.html\", data)\n\n def post(self, request):\n \"\"\"Download the json file of a user's data\"\"\"\n\n job = BookwyrmExportJob.objects.create(user=request.user)\n job.start_job()\n\n return redirect(\"prefs-user-export\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ExportArchive(View):\n \"\"\"Serve the archive file\"\"\"\n\n def get(self, request, archive_id):\n \"\"\"download user export file\"\"\"\n export = BookwyrmExportJob.objects.get(task_id=archive_id, user=request.user)\n return HttpResponse(\n export.export_data,\n content_type=\"application/gzip\",\n headers={\n \"Content-Disposition\": 'attachment; filename=\"bookwyrm-account-export.tar.gz\"' # pylint: disable=line-too-long\n },\n )\n", "path": "bookwyrm/views/preferences/export.py"}]}
2,307
502
gh_patches_debug_583
rasdani/github-patches
git_diff
pex-tool__pex-1750
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.85 On the docket: + [x] PEX interpreters should support all underlying Python interpreter options. #1745 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.84" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.84" +__version__ = "2.1.85"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.84\"\n+__version__ = \"2.1.85\"\n", "issue": "Release 2.1.85\nOn the docket:\r\n+ [x] PEX interpreters should support all underlying Python interpreter options. #1745\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.84\"\n", "path": "pex/version.py"}]}
618
96
gh_patches_debug_2856
rasdani/github-patches
git_diff
ResonantGeoData__ResonantGeoData-223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> _convert_to_cog function doesn't generate COG The current _convert_to_cog function (https://github.com/ResonantGeoData/ResonantGeoData/blob/master/rgd/geodata/models/imagery/subsample.py#L32-L51) doesn't output a COG; it just outputs a tiled tiff file. GDAL has two separate format writers; one for regular TIFF and one for COG. Without the '-of COG' option, the regular tiff writer is invoked. The options available to the two writers differ as well: for COG you can specify a predictor with a string value, for regular tiff you must specify a numeric predictor value. Using lzw compression, I'd recommend the following options: `['-co', 'COMPRESS=LZW', '-co', 'PREDICTOR=YES', '-of', 'COG']`. The default block (tile) size in the COG writer is 512 pixels; some tile servers or consumers seem to prefer 256; if so, add `-co BLOCKSIZE=256`. You can use tiffdump to see that COG files have multiple directories at different resolutions while non-COG files have a single directory. </issue> <code> [start of rgd/geodata/models/imagery/subsample.py] 1 """Tasks for subsampling images with GDAL.""" 2 import os 3 import tempfile 4 5 from celery.utils.log import get_task_logger 6 from django.conf import settings 7 from girder_utils.files import field_file_to_local_path 8 from osgeo import gdal 9 10 from ..common import ArbitraryFile 11 from .base import ConvertedImageFile 12 13 logger = get_task_logger(__name__) 14 15 16 def _gdal_translate(source_field, output_field, **kwargs): 17 workdir = getattr(settings, 'GEODATA_WORKDIR', None) 18 tmpdir = tempfile.mkdtemp(dir=workdir) 19 20 with field_file_to_local_path(source_field) as file_path: 21 logger.info(f'The image file path: {file_path}') 22 output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path)) 23 ds = gdal.Open(str(file_path)) 24 ds = gdal.Translate(output_path, ds, **kwargs) 25 ds = None 26 27 output_field.save(os.path.basename(output_path), open(output_path, 'rb')) 28 29 return 30 31 32 def convert_to_cog(cog_id): 33 """Populate ConvertedImageFile with COG file.""" 34 options = [ 35 '-co', 36 'COMPRESS=LZW', 37 '-co', 38 'TILED=YES', 39 ] 40 cog = ConvertedImageFile.objects.get(id=cog_id) 41 cog.converted_file = ArbitraryFile() 42 src = cog.source_image.image_file.imagefile.file 43 output = cog.converted_file.file 44 _gdal_translate(src, output, options=options) 45 cog.converted_file.save() 46 cog.save( 47 update_fields=[ 48 'converted_file', 49 ] 50 ) 51 return 52 [end of rgd/geodata/models/imagery/subsample.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rgd/geodata/models/imagery/subsample.py b/rgd/geodata/models/imagery/subsample.py --- a/rgd/geodata/models/imagery/subsample.py +++ b/rgd/geodata/models/imagery/subsample.py @@ -35,7 +35,11 @@ '-co', 'COMPRESS=LZW', '-co', - 'TILED=YES', + 'PREDICTOR=YES', + '-of', + 'COG', + '-co', + 'BLOCKSIZE=256', ] cog = ConvertedImageFile.objects.get(id=cog_id) cog.converted_file = ArbitraryFile()
{"golden_diff": "diff --git a/rgd/geodata/models/imagery/subsample.py b/rgd/geodata/models/imagery/subsample.py\n--- a/rgd/geodata/models/imagery/subsample.py\n+++ b/rgd/geodata/models/imagery/subsample.py\n@@ -35,7 +35,11 @@\n '-co',\n 'COMPRESS=LZW',\n '-co',\n- 'TILED=YES',\n+ 'PREDICTOR=YES',\n+ '-of',\n+ 'COG',\n+ '-co',\n+ 'BLOCKSIZE=256',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n", "issue": "_convert_to_cog function doesn't generate COG\nThe current _convert_to_cog function (https://github.com/ResonantGeoData/ResonantGeoData/blob/master/rgd/geodata/models/imagery/subsample.py#L32-L51) doesn't output a COG; it just outputs a tiled tiff file. GDAL has two separate format writers; one for regular TIFF and one for COG. Without the '-of COG' option, the regular tiff writer is invoked. The options available to the two writers differ as well: for COG you can specify a predictor with a string value, for regular tiff you must specify a numeric predictor value.\r\n\r\nUsing lzw compression, I'd recommend the following options: `['-co', 'COMPRESS=LZW', '-co', 'PREDICTOR=YES', '-of', 'COG']`. The default block (tile) size in the COG writer is 512 pixels; some tile servers or consumers seem to prefer 256; if so, add `-co BLOCKSIZE=256`.\r\n\r\nYou can use tiffdump to see that COG files have multiple directories at different resolutions while non-COG files have a single directory.\r\n\n", "before_files": [{"content": "\"\"\"Tasks for subsampling images with GDAL.\"\"\"\nimport os\nimport tempfile\n\nfrom celery.utils.log import get_task_logger\nfrom django.conf import settings\nfrom girder_utils.files import field_file_to_local_path\nfrom osgeo import gdal\n\nfrom ..common import ArbitraryFile\nfrom .base import ConvertedImageFile\n\nlogger = get_task_logger(__name__)\n\n\ndef _gdal_translate(source_field, output_field, **kwargs):\n workdir = getattr(settings, 'GEODATA_WORKDIR', None)\n tmpdir = tempfile.mkdtemp(dir=workdir)\n\n with field_file_to_local_path(source_field) as file_path:\n logger.info(f'The image file path: {file_path}')\n output_path = os.path.join(tmpdir, 'subsampled_' + os.path.basename(file_path))\n ds = gdal.Open(str(file_path))\n ds = gdal.Translate(output_path, ds, **kwargs)\n ds = None\n\n output_field.save(os.path.basename(output_path), open(output_path, 'rb'))\n\n return\n\n\ndef convert_to_cog(cog_id):\n \"\"\"Populate ConvertedImageFile with COG file.\"\"\"\n options = [\n '-co',\n 'COMPRESS=LZW',\n '-co',\n 'TILED=YES',\n ]\n cog = ConvertedImageFile.objects.get(id=cog_id)\n cog.converted_file = ArbitraryFile()\n src = cog.source_image.image_file.imagefile.file\n output = cog.converted_file.file\n _gdal_translate(src, output, options=options)\n cog.converted_file.save()\n cog.save(\n update_fields=[\n 'converted_file',\n ]\n )\n return\n", "path": "rgd/geodata/models/imagery/subsample.py"}]}
1,263
154
gh_patches_debug_25458
rasdani/github-patches
git_diff
docker__docker-py-1115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support create volume Labels options First, check the remote API: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/ ``` { "Name": "tardis", "Labels": { "com.example.some-label": "some-value", "com.example.some-other-label": "some-other-value" }, } ``` It provides `Labels` options. But docker-py doesn't support it right not. Second, in docker-py's document give the following example: ``` volume = cli.create_volume( name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'} docker.errors.APIError: 500 Server Error: Internal Server Error ("create foobar: invalid option key: "foo"") ``` But driver_opts is not working. What kinds of driver_opts can we use? </issue> <code> [start of docker/api/volume.py] 1 from .. import utils 2 3 4 class VolumeApiMixin(object): 5 @utils.minimum_version('1.21') 6 def volumes(self, filters=None): 7 params = { 8 'filters': utils.convert_filters(filters) if filters else None 9 } 10 url = self._url('/volumes') 11 return self._result(self._get(url, params=params), True) 12 13 @utils.minimum_version('1.21') 14 def create_volume(self, name, driver=None, driver_opts=None): 15 url = self._url('/volumes/create') 16 if driver_opts is not None and not isinstance(driver_opts, dict): 17 raise TypeError('driver_opts must be a dictionary') 18 19 data = { 20 'Name': name, 21 'Driver': driver, 22 'DriverOpts': driver_opts, 23 } 24 return self._result(self._post_json(url, data=data), True) 25 26 @utils.minimum_version('1.21') 27 def inspect_volume(self, name): 28 url = self._url('/volumes/{0}', name) 29 return self._result(self._get(url), True) 30 31 @utils.minimum_version('1.21') 32 def remove_volume(self, name): 33 url = self._url('/volumes/{0}', name) 34 resp = self._delete(url) 35 self._raise_for_status(resp) 36 [end of docker/api/volume.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/api/volume.py b/docker/api/volume.py --- a/docker/api/volume.py +++ b/docker/api/volume.py @@ -1,3 +1,4 @@ +from .. import errors from .. import utils @@ -11,7 +12,7 @@ return self._result(self._get(url, params=params), True) @utils.minimum_version('1.21') - def create_volume(self, name, driver=None, driver_opts=None): + def create_volume(self, name, driver=None, driver_opts=None, labels=None): url = self._url('/volumes/create') if driver_opts is not None and not isinstance(driver_opts, dict): raise TypeError('driver_opts must be a dictionary') @@ -21,6 +22,16 @@ 'Driver': driver, 'DriverOpts': driver_opts, } + + if labels is not None: + if utils.compare_version('1.23', self._version) < 0: + raise errors.InvalidVersion( + 'volume labels were introduced in API 1.23' + ) + if not isinstance(labels, dict): + raise TypeError('labels must be a dictionary') + data["Labels"] = labels + return self._result(self._post_json(url, data=data), True) @utils.minimum_version('1.21')
{"golden_diff": "diff --git a/docker/api/volume.py b/docker/api/volume.py\n--- a/docker/api/volume.py\n+++ b/docker/api/volume.py\n@@ -1,3 +1,4 @@\n+from .. import errors\n from .. import utils\n \n \n@@ -11,7 +12,7 @@\n return self._result(self._get(url, params=params), True)\n \n @utils.minimum_version('1.21')\n- def create_volume(self, name, driver=None, driver_opts=None):\n+ def create_volume(self, name, driver=None, driver_opts=None, labels=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n@@ -21,6 +22,16 @@\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n+\n+ if labels is not None:\n+ if utils.compare_version('1.23', self._version) < 0:\n+ raise errors.InvalidVersion(\n+ 'volume labels were introduced in API 1.23'\n+ )\n+ if not isinstance(labels, dict):\n+ raise TypeError('labels must be a dictionary')\n+ data[\"Labels\"] = labels\n+\n return self._result(self._post_json(url, data=data), True)\n \n @utils.minimum_version('1.21')\n", "issue": "Support create volume Labels options\nFirst, check the remote API:\nhttps://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/\n\n```\n {\n \"Name\": \"tardis\",\n \"Labels\": {\n \"com.example.some-label\": \"some-value\",\n \"com.example.some-other-label\": \"some-other-value\"\n },\n}\n```\n\nIt provides `Labels` options. But docker-py doesn't support it right not.\n\nSecond, in docker-py's document give the following example:\n\n```\nvolume = cli.create_volume(\n name='foobar', driver='local', driver_opts={'foo': 'bar', 'baz': 'false'}\n\ndocker.errors.APIError: 500 Server Error: Internal Server Error (\"create foobar: invalid option key: \"foo\"\")\n```\n\nBut driver_opts is not working. What kinds of driver_opts can we use?\n\n", "before_files": [{"content": "from .. import utils\n\n\nclass VolumeApiMixin(object):\n @utils.minimum_version('1.21')\n def volumes(self, filters=None):\n params = {\n 'filters': utils.convert_filters(filters) if filters else None\n }\n url = self._url('/volumes')\n return self._result(self._get(url, params=params), True)\n\n @utils.minimum_version('1.21')\n def create_volume(self, name, driver=None, driver_opts=None):\n url = self._url('/volumes/create')\n if driver_opts is not None and not isinstance(driver_opts, dict):\n raise TypeError('driver_opts must be a dictionary')\n\n data = {\n 'Name': name,\n 'Driver': driver,\n 'DriverOpts': driver_opts,\n }\n return self._result(self._post_json(url, data=data), True)\n\n @utils.minimum_version('1.21')\n def inspect_volume(self, name):\n url = self._url('/volumes/{0}', name)\n return self._result(self._get(url), True)\n\n @utils.minimum_version('1.21')\n def remove_volume(self, name):\n url = self._url('/volumes/{0}', name)\n resp = self._delete(url)\n self._raise_for_status(resp)\n", "path": "docker/api/volume.py"}]}
1,062
306
gh_patches_debug_29720
rasdani/github-patches
git_diff
wagtail__wagtail-688
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need to document focal_point_key changes for people who use custom image models Everyone who uses custom image models need to create a new migration to make the `focal_point_key` field on rendition not nullable. Also, South doesn't automatically add the `default=''` into the migration (even though it's set on the field) which would cause the migration to fail. The migration may also fail if you have duplicate renditions (this applies to everyone, not just custom image model people). The following SQL query should delete them: ``` DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN (SELECT image_id || '-' || filter_id FROM (SELECT image_id, filter_id, COUNT(*) AS count FROM wagtailimages_rendition GROUP BY image_id, filter_id) AS renditions WHERE count > 1); ``` </issue> <code> [start of wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from south.utils import datetime_utils as datetime 5 from south.db import db 6 from south.v2 import SchemaMigration 7 from django.db import models 8 9 10 class Migration(SchemaMigration): 11 12 def forwards(self, orm): 13 14 # Changing field 'Rendition.focal_point_key' 15 db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, default='')) 16 17 def backwards(self, orm): 18 19 # Changing field 'Rendition.focal_point_key' 20 db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, null=True)) 21 22 models = { 23 'auth.group': { 24 'Meta': {'object_name': 'Group'}, 25 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 26 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 27 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) 28 }, 29 'auth.permission': { 30 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 31 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 32 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 33 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 34 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) 35 }, 36 'auth.user': { 37 'Meta': {'object_name': 'User'}, 38 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 39 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 40 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 41 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}), 42 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 43 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 44 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 45 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 46 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 47 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 48 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 49 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}), 50 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) 51 }, 52 'contenttypes.contenttype': { 53 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 54 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 55 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 56 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 57 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) 58 }, 59 'wagtailimages.filter': { 60 'Meta': {'object_name': 'Filter'}, 61 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 62 'spec': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) 63 }, 64 'wagtailimages.image': { 65 'Meta': {'object_name': 'Image'}, 66 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 67 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 68 'focal_point_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 69 'focal_point_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 70 'focal_point_x': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 71 'focal_point_y': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 72 'height': ('django.db.models.fields.IntegerField', [], {}), 73 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 74 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 75 'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 76 'width': ('django.db.models.fields.IntegerField', [], {}) 77 }, 78 'wagtailimages.rendition': { 79 'Meta': {'unique_together': "(('image', 'filter', 'focal_point_key'),)", 'object_name': 'Rendition'}, 80 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 81 'filter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['wagtailimages.Filter']"}), 82 'focal_point_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}), 83 'height': ('django.db.models.fields.IntegerField', [], {}), 84 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 85 'image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'renditions'", 'to': "orm['wagtailimages.Image']"}), 86 'width': ('django.db.models.fields.IntegerField', [], {}) 87 } 88 } 89 90 complete_apps = ['wagtailimages'] [end of wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py] [start of wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py] 1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from django.db import models, migrations 5 6 7 class Migration(migrations.Migration): 8 9 dependencies = [ 10 ('wagtailimages', '0003_fix_focal_point_fields'), 11 ] 12 13 operations = [ 14 migrations.AlterField( 15 model_name='rendition', 16 name='focal_point_key', 17 field=models.CharField(blank=True, default='', max_length=255, editable=False), 18 ), 19 ] 20 [end of wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py b/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py --- a/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py +++ b/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py @@ -4,6 +4,13 @@ from django.db import models, migrations +def remove_duplicate_renditions(apps, schema_editor): + schema_editor.execute(""" + DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN ( + SELECT image_id || '-' || filter_id FROM wagtailimages_rendition WHERE focal_point_key IS NULL GROUP BY image_id, filter_id HAVING COUNT(*) > 1 + ) AND focal_point_key IS NULL + """) + class Migration(migrations.Migration): dependencies = [ @@ -11,6 +18,7 @@ ] operations = [ + migrations.RunPython(remove_duplicate_renditions), migrations.AlterField( model_name='rendition', name='focal_point_key', diff --git a/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py b/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py --- a/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py +++ b/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py @@ -10,6 +10,12 @@ class Migration(SchemaMigration): def forwards(self, orm): + # remove duplicate renditions + db.execute(""" + DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN ( + SELECT image_id || '-' || filter_id FROM wagtailimages_rendition WHERE focal_point_key IS NULL GROUP BY image_id, filter_id HAVING COUNT(*) > 1 + ) AND focal_point_key IS NULL + """) # Changing field 'Rendition.focal_point_key' db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))
{"golden_diff": "diff --git a/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py b/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py\n--- a/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py\n+++ b/wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py\n@@ -4,6 +4,13 @@\n from django.db import models, migrations\n \n \n+def remove_duplicate_renditions(apps, schema_editor):\n+ schema_editor.execute(\"\"\"\n+ DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN (\n+ SELECT image_id || '-' || filter_id FROM wagtailimages_rendition WHERE focal_point_key IS NULL GROUP BY image_id, filter_id HAVING COUNT(*) > 1\n+ ) AND focal_point_key IS NULL\n+ \"\"\")\n+\n class Migration(migrations.Migration):\n \n dependencies = [\n@@ -11,6 +18,7 @@\n ]\n \n operations = [\n+ migrations.RunPython(remove_duplicate_renditions),\n migrations.AlterField(\n model_name='rendition',\n name='focal_point_key',\ndiff --git a/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py b/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py\n--- a/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py\n+++ b/wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py\n@@ -10,6 +10,12 @@\n class Migration(SchemaMigration):\n \n def forwards(self, orm):\n+ # remove duplicate renditions\n+ db.execute(\"\"\"\n+ DELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN (\n+ SELECT image_id || '-' || filter_id FROM wagtailimages_rendition WHERE focal_point_key IS NULL GROUP BY image_id, filter_id HAVING COUNT(*) > 1\n+ ) AND focal_point_key IS NULL\n+ \"\"\")\n \n # Changing field 'Rendition.focal_point_key'\n db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))\n", "issue": "Need to document focal_point_key changes for people who use custom image models\nEveryone who uses custom image models need to create a new migration to make the `focal_point_key` field on rendition not nullable. Also, South doesn't automatically add the `default=''` into the migration (even though it's set on the field) which would cause the migration to fail.\n\nThe migration may also fail if you have duplicate renditions (this applies to everyone, not just custom image model people). The following SQL query should delete them:\n\n```\nDELETE FROM wagtailimages_rendition WHERE image_id || '-' || filter_id IN (SELECT image_id || '-' || filter_id FROM (SELECT image_id, filter_id, COUNT(*) AS count FROM wagtailimages_rendition GROUP BY image_id, filter_id) AS renditions WHERE count > 1);\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n\n # Changing field 'Rendition.focal_point_key'\n db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, default=''))\n\n def backwards(self, orm):\n\n # Changing field 'Rendition.focal_point_key'\n db.alter_column('wagtailimages_rendition', 'focal_point_key', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))\n\n models = {\n 'auth.group': {\n 'Meta': {'object_name': 'Group'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),\n 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': \"orm['auth.Permission']\", 'symmetrical': 'False', 'blank': 'True'})\n },\n 'auth.permission': {\n 'Meta': {'ordering': \"('content_type__app_label', 'content_type__model', 'codename')\", 'unique_together': \"(('content_type', 'codename'),)\", 'object_name': 'Permission'},\n 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['contenttypes.ContentType']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})\n },\n 'auth.user': {\n 'Meta': {'object_name': 'User'},\n 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),\n 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'user_set'\", 'blank': 'True', 'to': \"orm['auth.Group']\"}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),\n 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),\n 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),\n 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),\n 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': \"'user_set'\", 'blank': 'True', 'to': \"orm['auth.Permission']\"}),\n 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})\n },\n 'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n 'wagtailimages.filter': {\n 'Meta': {'object_name': 'Filter'},\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'spec': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})\n },\n 'wagtailimages.image': {\n 'Meta': {'object_name': 'Image'},\n 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'focal_point_height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'focal_point_width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'focal_point_x': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'focal_point_y': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),\n 'height': ('django.db.models.fields.IntegerField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),\n 'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': \"orm['auth.User']\", 'null': 'True', 'blank': 'True'}),\n 'width': ('django.db.models.fields.IntegerField', [], {})\n },\n 'wagtailimages.rendition': {\n 'Meta': {'unique_together': \"(('image', 'filter', 'focal_point_key'),)\", 'object_name': 'Rendition'},\n 'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),\n 'filter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'+'\", 'to': \"orm['wagtailimages.Filter']\"}),\n 'focal_point_key': ('django.db.models.fields.CharField', [], {'default': \"''\", 'max_length': '255', 'blank': 'True'}),\n 'height': ('django.db.models.fields.IntegerField', [], {}),\n 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"'renditions'\", 'to': \"orm['wagtailimages.Image']\"}),\n 'width': ('django.db.models.fields.IntegerField', [], {})\n }\n }\n\n complete_apps = ['wagtailimages']", "path": "wagtail/wagtailimages/south_migrations/0004_auto__chg_field_rendition_focal_point_key.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wagtailimages', '0003_fix_focal_point_fields'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='rendition',\n name='focal_point_key',\n field=models.CharField(blank=True, default='', max_length=255, editable=False),\n ),\n ]\n", "path": "wagtail/wagtailimages/migrations/0004_make_focal_point_key_not_nullable.py"}]}
2,603
563
gh_patches_debug_8491
rasdani/github-patches
git_diff
OpenEnergyPlatform__oeplatform-1324
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Inconvenience of client cache update upon releasing/deploy new versions ## Description of the issue When deploying a new version of an application, users are inconvenienced as they need to manually refresh the client cache to see the latest changes. ## Ideas of solution ChatGPT suggested to introduce a Cache Busting Mechanism. We could use this existing lib [django-compressor](https://github.com/django-compressor/django-compressor). > To set up a cache-busting mechanism in your Django application, you can use the Django extension module "django-compressor". Django Compressor allows you to compress static resources such as CSS and JavaScript files and assign them a unique hash value based on their content. This automatically changes the URL of the resources whenever they are modified. ## Workflow checklist - [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md) </issue> <code> [start of oeplatform/settings.py] 1 """ 2 Django settings for oeplatform project. 3 4 Generated by 'django-admin startproject' using Django 1.8.5. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.8/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.8/ref/settings/ 11 """ 12 13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 15 try: 16 from .securitysettings import * # noqa 17 except ImportError: 18 import logging 19 import os 20 21 logging.error("No securitysettings found. Triggerd in oeplatform/settings.py") 22 SECRET_KEY = os.environ.get("SECRET_KEY", "0") 23 DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL") 24 URL = os.environ.get("URL") 25 26 # Quick-start development settings - unsuitable for production 27 # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ 28 29 # Application definition 30 31 INSTALLED_APPS = ( 32 "django.contrib.sites", 33 "django.contrib.admin", 34 "django.contrib.auth", 35 "django.contrib.contenttypes", 36 "django.contrib.sessions", 37 "django.contrib.messages", 38 "django.contrib.staticfiles", 39 "django.contrib.sessions.backends.signed_cookies", 40 "django_bootstrap5", 41 "rest_framework", 42 "rest_framework.authtoken", 43 "modelview", 44 "modelview.templatetags.modelview_extras", 45 "login", 46 "base", 47 "base.templatetags.base_tags", 48 "widget_tweaks", 49 "dataedit", 50 "colorfield", 51 "api", 52 "ontology", 53 "axes", 54 "captcha", 55 "django.contrib.postgres", 56 "fontawesome_5", 57 "django_better_admin_arrayfield", 58 "oeo_viewer", 59 ) 60 61 MIDDLEWARE = ( 62 "django.contrib.sites.middleware.CurrentSiteMiddleware", 63 "django.contrib.sessions.middleware.SessionMiddleware", 64 "django.middleware.common.CommonMiddleware", 65 "django.middleware.csrf.CsrfViewMiddleware", 66 "django.contrib.auth.middleware.AuthenticationMiddleware", 67 "django.contrib.messages.middleware.MessageMiddleware", 68 "django.middleware.clickjacking.XFrameOptionsMiddleware", 69 "django.middleware.security.SecurityMiddleware", 70 "login.middleware.DetachMiddleware", 71 "axes.middleware.AxesMiddleware", 72 "django.middleware.common.CommonMiddleware", 73 ) 74 75 ROOT_URLCONF = "oeplatform.urls" 76 77 EXTERNAL_URLS = { 78 "tutorials_index": "https://openenergyplatform.github.io/academy/", 79 "tutorials_faq": "https://openenergyplatform.github.io/academy/", 80 "tutorials_api1": "https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/", # noqa E501 81 "tutorials_licenses": "https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/", 82 # noqa E501 83 "readthedocs": "https://oeplatform.readthedocs.io/en/latest/?badge=latest", 84 "compendium": "https://openenergyplatform.github.io/organisation/", 85 } 86 87 88 def external_urls_context_processor(request): 89 """Define hard coded external urls here. 90 Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }} 91 Also, you may want to add an icon indicating external links, e.g. 92 """ 93 return {"EXTERNAL_URLS": EXTERNAL_URLS} 94 95 96 SITE_ID = 1 97 98 TEMPLATES = [ 99 { 100 "BACKEND": "django.template.backends.django.DjangoTemplates", 101 "DIRS": [], 102 "APP_DIRS": True, 103 "OPTIONS": { 104 "context_processors": [ 105 "django.template.context_processors.debug", 106 "django.template.context_processors.request", 107 "django.contrib.auth.context_processors.auth", 108 "django.contrib.messages.context_processors.messages", 109 "oeplatform.settings.external_urls_context_processor", 110 ] 111 }, 112 } 113 ] 114 115 CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"] 116 117 GRAPHENE = {"SCHEMA": "factsheet.schema.schema"} 118 119 WSGI_APPLICATION = "oeplatform.wsgi.application" 120 121 try: 122 ONTOLOGY_FOLDER # noqa 123 except NameError: 124 ONTOLOGY_FOLDER = "/tmp" 125 126 # Internationalization 127 # https://docs.djangoproject.com/en/1.8/topics/i18n/ 128 129 LANGUAGE_CODE = "en-us" 130 131 TIME_ZONE = "Europe/Berlin" 132 133 USE_I18N = True 134 135 USE_L10N = True 136 137 USE_TZ = True 138 139 # Static files (CSS, JavaScript, Images) 140 # https://docs.djangoproject.com/en/1.8/howto/static-files/ 141 142 AUTH_USER_MODEL = "login.myuser" 143 LOGIN_URL = "/user/login" 144 LOGIN_REDIRECT_URL = "/" 145 146 REST_FRAMEWORK = { 147 "DEFAULT_AUTHENTICATION_CLASSES": ( 148 "rest_framework.authentication.BasicAuthentication", 149 "rest_framework.authentication.SessionAuthentication", 150 "rest_framework.authentication.TokenAuthentication", 151 ) 152 } 153 154 AUTHENTICATION_BACKENDS = [ 155 # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list. 156 "axes.backends.AxesBackend", 157 # custom class extenging Django ModelBackend for login with username OR email 158 "login.backends.ModelBackendWithEmail", 159 ] 160 161 DEFAULT_AUTO_FIELD = "django.db.models.AutoField" 162 [end of oeplatform/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/oeplatform/settings.py b/oeplatform/settings.py --- a/oeplatform/settings.py +++ b/oeplatform/settings.py @@ -56,6 +56,7 @@ "fontawesome_5", "django_better_admin_arrayfield", "oeo_viewer", + "compressor", ) MIDDLEWARE = ( @@ -159,3 +160,12 @@ ] DEFAULT_AUTO_FIELD = "django.db.models.AutoField" + +STATICFILES_FINDERS = { + 'django.contrib.staticfiles.finders.FileSystemFinder', + 'django.contrib.staticfiles.finders.AppDirectoriesFinder', + 'compressor.finders.CompressorFinder', +} + +COMPRESS_ENABLED = True +COMPRESS_OFFLINE = True
{"golden_diff": "diff --git a/oeplatform/settings.py b/oeplatform/settings.py\n--- a/oeplatform/settings.py\n+++ b/oeplatform/settings.py\n@@ -56,6 +56,7 @@\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n+ \"compressor\",\n )\n \n MIDDLEWARE = (\n@@ -159,3 +160,12 @@\n ]\n \n DEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n+\n+STATICFILES_FINDERS = {\n+ 'django.contrib.staticfiles.finders.FileSystemFinder',\n+ 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n+ 'compressor.finders.CompressorFinder',\n+}\n+\n+COMPRESS_ENABLED = True\n+COMPRESS_OFFLINE = True\n", "issue": "Inconvenience of client cache update upon releasing/deploy new versions\n## Description of the issue\r\n\r\nWhen deploying a new version of an application, users are inconvenienced as they need to manually refresh the client cache to see the latest changes.\r\n\r\n## Ideas of solution\r\n\r\nChatGPT suggested to introduce a Cache Busting Mechanism. We could use this existing lib [django-compressor](https://github.com/django-compressor/django-compressor).\r\n\r\n> To set up a cache-busting mechanism in your Django application, you can use the Django extension module \"django-compressor\". Django Compressor allows you to compress static resources such as CSS and JavaScript files and assign them a unique hash value based on their content. This automatically changes the URL of the resources whenever they are modified.\r\n\r\n## Workflow checklist\r\n- [x] I am aware of the workflow in [CONTRIBUTING.md](https://github.com/OpenEnergyPlatform/oeplatform/blob/develop/CONTRIBUTING.md)\r\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for oeplatform project.\n\nGenerated by 'django-admin startproject' using Django 1.8.5.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n\ntry:\n from .securitysettings import * # noqa\nexcept ImportError:\n import logging\n import os\n\n logging.error(\"No securitysettings found. Triggerd in oeplatform/settings.py\")\n SECRET_KEY = os.environ.get(\"SECRET_KEY\", \"0\")\n DEFAULT_FROM_EMAIL = os.environ.get(\"DEFAULT_FROM_EMAIL\")\n URL = os.environ.get(\"URL\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# Application definition\n\nINSTALLED_APPS = (\n \"django.contrib.sites\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sessions.backends.signed_cookies\",\n \"django_bootstrap5\",\n \"rest_framework\",\n \"rest_framework.authtoken\",\n \"modelview\",\n \"modelview.templatetags.modelview_extras\",\n \"login\",\n \"base\",\n \"base.templatetags.base_tags\",\n \"widget_tweaks\",\n \"dataedit\",\n \"colorfield\",\n \"api\",\n \"ontology\",\n \"axes\",\n \"captcha\",\n \"django.contrib.postgres\",\n \"fontawesome_5\",\n \"django_better_admin_arrayfield\",\n \"oeo_viewer\",\n)\n\nMIDDLEWARE = (\n \"django.contrib.sites.middleware.CurrentSiteMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"login.middleware.DetachMiddleware\",\n \"axes.middleware.AxesMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n)\n\nROOT_URLCONF = \"oeplatform.urls\"\n\nEXTERNAL_URLS = {\n \"tutorials_index\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_faq\": \"https://openenergyplatform.github.io/academy/\",\n \"tutorials_api1\": \"https://openenergyplatform.github.io/academy/tutorials/api/OEP_API_tutorial_part1/\", # noqa E501\n \"tutorials_licenses\": \"https://openenergyplatform.github.io/academy/tutorials/metadata/tutorial_open-data-licenses/\",\n # noqa E501\n \"readthedocs\": \"https://oeplatform.readthedocs.io/en/latest/?badge=latest\",\n \"compendium\": \"https://openenergyplatform.github.io/organisation/\",\n}\n\n\ndef external_urls_context_processor(request):\n \"\"\"Define hard coded external urls here.\n Use in templates like this: {{ EXTERNAL_URLS.<name_of_url> }}\n Also, you may want to add an icon indicating external links, e.g.\n \"\"\"\n return {\"EXTERNAL_URLS\": EXTERNAL_URLS}\n\n\nSITE_ID = 1\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"oeplatform.settings.external_urls_context_processor\",\n ]\n },\n }\n]\n\nCORS_ORIGIN_WHITELIST = [\"http://localhost:3000\", \"http://127.0.0.1:3000\"]\n\nGRAPHENE = {\"SCHEMA\": \"factsheet.schema.schema\"}\n\nWSGI_APPLICATION = \"oeplatform.wsgi.application\"\n\ntry:\n ONTOLOGY_FOLDER # noqa\nexcept NameError:\n ONTOLOGY_FOLDER = \"/tmp\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"Europe/Berlin\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nAUTH_USER_MODEL = \"login.myuser\"\nLOGIN_URL = \"/user/login\"\nLOGIN_REDIRECT_URL = \"/\"\n\nREST_FRAMEWORK = {\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n \"rest_framework.authentication.TokenAuthentication\",\n )\n}\n\nAUTHENTICATION_BACKENDS = [\n # AxesBackend should be the first backend in the AUTHENTICATION_BACKENDS list.\n \"axes.backends.AxesBackend\",\n # custom class extenging Django ModelBackend for login with username OR email\n \"login.backends.ModelBackendWithEmail\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n", "path": "oeplatform/settings.py"}]}
2,264
168
gh_patches_debug_25638
rasdani/github-patches
git_diff
beetbox__beets-1138
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> smartplaylist: Respect sort terms in queries It would be nice to be able sort items in smart playlists, like so: ``` smartplaylist: relative_to: ~/music playlist_dir: ~/.mpd/playlists playlists: - name: '2014.m3u' query: 'year:2014 added+' - name: 'imported-%time{$added,%Y-%m}.m3u' query: "added:: path+" ``` I'm unfamiliar with the code but it looks like the smartplaylist plugin [discards the query `sort` field](https://github.com/sampsyo/beets/blob/master/beetsplug/smartplaylist.py#L45) when building playlists. Would it be possible to keep the custom sort? </issue> <code> [start of beetsplug/smartplaylist.py] 1 # This file is part of beets. 2 # Copyright 2013, Dang Mai <[email protected]>. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Generates smart playlists based on beets queries. 16 """ 17 from __future__ import print_function 18 19 from beets.plugins import BeetsPlugin 20 from beets import config, ui, library 21 from beets import dbcore 22 from beets.util import normpath, syspath 23 import os 24 25 # Global variable so that smartplaylist can detect database changes and run 26 # only once before beets exits. 27 database_changed = False 28 29 30 def _items_for_query(lib, playlist, album=False): 31 """Get the matching items for a playlist's configured queries. 32 `album` indicates whether to process the item-level query or the 33 album-level query (if any). 34 """ 35 key = 'album_query' if album else 'query' 36 if key not in playlist: 37 return [] 38 39 # Parse quer(ies). If it's a list, join the queries with OR. 40 query_strings = playlist[key] 41 if not isinstance(query_strings, (list, tuple)): 42 query_strings = [query_strings] 43 model = library.Album if album else library.Item 44 query = dbcore.OrQuery( 45 [library.parse_query_string(q, model)[0] for q in query_strings] 46 ) 47 48 # Execute query, depending on type. 49 if album: 50 result = [] 51 for album in lib.albums(query): 52 result.extend(album.items()) 53 return result 54 else: 55 return lib.items(query) 56 57 58 def update_playlists(lib): 59 ui.print_("Updating smart playlists...") 60 playlists = config['smartplaylist']['playlists'].get(list) 61 playlist_dir = config['smartplaylist']['playlist_dir'].as_filename() 62 relative_to = config['smartplaylist']['relative_to'].get() 63 if relative_to: 64 relative_to = normpath(relative_to) 65 66 for playlist in playlists: 67 items = [] 68 items.extend(_items_for_query(lib, playlist, True)) 69 items.extend(_items_for_query(lib, playlist, False)) 70 71 m3us = {} 72 basename = playlist['name'].encode('utf8') 73 # As we allow tags in the m3u names, we'll need to iterate through 74 # the items and generate the correct m3u file names. 75 for item in items: 76 m3u_name = item.evaluate_template(basename, True) 77 if not (m3u_name in m3us): 78 m3us[m3u_name] = [] 79 item_path = item.path 80 if relative_to: 81 item_path = os.path.relpath(item.path, relative_to) 82 if item_path not in m3us[m3u_name]: 83 m3us[m3u_name].append(item_path) 84 # Now iterate through the m3us that we need to generate 85 for m3u in m3us: 86 m3u_path = normpath(os.path.join(playlist_dir, m3u)) 87 with open(syspath(m3u_path), 'w') as f: 88 for path in m3us[m3u]: 89 f.write(path + '\n') 90 ui.print_("... Done") 91 92 93 class SmartPlaylistPlugin(BeetsPlugin): 94 def __init__(self): 95 super(SmartPlaylistPlugin, self).__init__() 96 self.config.add({ 97 'relative_to': None, 98 'playlist_dir': u'.', 99 'auto': True, 100 'playlists': [] 101 }) 102 103 def commands(self): 104 def update(lib, opts, args): 105 update_playlists(lib) 106 spl_update = ui.Subcommand('splupdate', 107 help='update the smart playlists') 108 spl_update.func = update 109 return [spl_update] 110 111 112 @SmartPlaylistPlugin.listen('database_change') 113 def handle_change(lib): 114 global database_changed 115 database_changed = True 116 117 118 @SmartPlaylistPlugin.listen('cli_exit') 119 def update(lib): 120 auto = config['smartplaylist']['auto'] 121 if database_changed and auto: 122 update_playlists(lib) 123 [end of beetsplug/smartplaylist.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py --- a/beetsplug/smartplaylist.py +++ b/beetsplug/smartplaylist.py @@ -18,7 +18,6 @@ from beets.plugins import BeetsPlugin from beets import config, ui, library -from beets import dbcore from beets.util import normpath, syspath import os @@ -36,23 +35,21 @@ if key not in playlist: return [] - # Parse quer(ies). If it's a list, join the queries with OR. + # Parse quer(ies). If it's a list, perform the queries and manually + # concatenate the results query_strings = playlist[key] if not isinstance(query_strings, (list, tuple)): query_strings = [query_strings] model = library.Album if album else library.Item - query = dbcore.OrQuery( - [library.parse_query_string(q, model)[0] for q in query_strings] - ) - - # Execute query, depending on type. - if album: - result = [] - for album in lib.albums(query): - result.extend(album.items()) - return result - else: - return lib.items(query) + results = [] + for q in query_strings: + querystr, sort = library.parse_query_string(q, model) + if album: + new = lib.albums(querystr, sort) + else: + new = lib.items(querystr, sort) + results.extend(new) + return results def update_playlists(lib):
{"golden_diff": "diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py\n--- a/beetsplug/smartplaylist.py\n+++ b/beetsplug/smartplaylist.py\n@@ -18,7 +18,6 @@\n \n from beets.plugins import BeetsPlugin\n from beets import config, ui, library\n-from beets import dbcore\n from beets.util import normpath, syspath\n import os\n \n@@ -36,23 +35,21 @@\n if key not in playlist:\n return []\n \n- # Parse quer(ies). If it's a list, join the queries with OR.\n+ # Parse quer(ies). If it's a list, perform the queries and manually\n+ # concatenate the results\n query_strings = playlist[key]\n if not isinstance(query_strings, (list, tuple)):\n query_strings = [query_strings]\n model = library.Album if album else library.Item\n- query = dbcore.OrQuery(\n- [library.parse_query_string(q, model)[0] for q in query_strings]\n- )\n-\n- # Execute query, depending on type.\n- if album:\n- result = []\n- for album in lib.albums(query):\n- result.extend(album.items())\n- return result\n- else:\n- return lib.items(query)\n+ results = []\n+ for q in query_strings:\n+ querystr, sort = library.parse_query_string(q, model)\n+ if album:\n+ new = lib.albums(querystr, sort)\n+ else:\n+ new = lib.items(querystr, sort)\n+ results.extend(new)\n+ return results\n \n \n def update_playlists(lib):\n", "issue": "smartplaylist: Respect sort terms in queries\nIt would be nice to be able sort items in smart playlists, like so:\n\n```\nsmartplaylist:\n relative_to: ~/music\n playlist_dir: ~/.mpd/playlists\n playlists:\n - name: '2014.m3u'\n query: 'year:2014 added+'\n - name: 'imported-%time{$added,%Y-%m}.m3u'\n query: \"added:: path+\"\n```\n\nI'm unfamiliar with the code but it looks like the smartplaylist plugin [discards the query `sort` field](https://github.com/sampsyo/beets/blob/master/beetsplug/smartplaylist.py#L45) when building playlists. Would it be possible to keep the custom sort?\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Dang Mai <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Generates smart playlists based on beets queries.\n\"\"\"\nfrom __future__ import print_function\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import config, ui, library\nfrom beets import dbcore\nfrom beets.util import normpath, syspath\nimport os\n\n# Global variable so that smartplaylist can detect database changes and run\n# only once before beets exits.\ndatabase_changed = False\n\n\ndef _items_for_query(lib, playlist, album=False):\n \"\"\"Get the matching items for a playlist's configured queries.\n `album` indicates whether to process the item-level query or the\n album-level query (if any).\n \"\"\"\n key = 'album_query' if album else 'query'\n if key not in playlist:\n return []\n\n # Parse quer(ies). If it's a list, join the queries with OR.\n query_strings = playlist[key]\n if not isinstance(query_strings, (list, tuple)):\n query_strings = [query_strings]\n model = library.Album if album else library.Item\n query = dbcore.OrQuery(\n [library.parse_query_string(q, model)[0] for q in query_strings]\n )\n\n # Execute query, depending on type.\n if album:\n result = []\n for album in lib.albums(query):\n result.extend(album.items())\n return result\n else:\n return lib.items(query)\n\n\ndef update_playlists(lib):\n ui.print_(\"Updating smart playlists...\")\n playlists = config['smartplaylist']['playlists'].get(list)\n playlist_dir = config['smartplaylist']['playlist_dir'].as_filename()\n relative_to = config['smartplaylist']['relative_to'].get()\n if relative_to:\n relative_to = normpath(relative_to)\n\n for playlist in playlists:\n items = []\n items.extend(_items_for_query(lib, playlist, True))\n items.extend(_items_for_query(lib, playlist, False))\n\n m3us = {}\n basename = playlist['name'].encode('utf8')\n # As we allow tags in the m3u names, we'll need to iterate through\n # the items and generate the correct m3u file names.\n for item in items:\n m3u_name = item.evaluate_template(basename, True)\n if not (m3u_name in m3us):\n m3us[m3u_name] = []\n item_path = item.path\n if relative_to:\n item_path = os.path.relpath(item.path, relative_to)\n if item_path not in m3us[m3u_name]:\n m3us[m3u_name].append(item_path)\n # Now iterate through the m3us that we need to generate\n for m3u in m3us:\n m3u_path = normpath(os.path.join(playlist_dir, m3u))\n with open(syspath(m3u_path), 'w') as f:\n for path in m3us[m3u]:\n f.write(path + '\\n')\n ui.print_(\"... Done\")\n\n\nclass SmartPlaylistPlugin(BeetsPlugin):\n def __init__(self):\n super(SmartPlaylistPlugin, self).__init__()\n self.config.add({\n 'relative_to': None,\n 'playlist_dir': u'.',\n 'auto': True,\n 'playlists': []\n })\n\n def commands(self):\n def update(lib, opts, args):\n update_playlists(lib)\n spl_update = ui.Subcommand('splupdate',\n help='update the smart playlists')\n spl_update.func = update\n return [spl_update]\n\n\[email protected]('database_change')\ndef handle_change(lib):\n global database_changed\n database_changed = True\n\n\[email protected]('cli_exit')\ndef update(lib):\n auto = config['smartplaylist']['auto']\n if database_changed and auto:\n update_playlists(lib)\n", "path": "beetsplug/smartplaylist.py"}]}
1,940
367
gh_patches_debug_15368
rasdani/github-patches
git_diff
netbox-community__netbox-4850
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Swagger doesn't show required field "prefix_length" in available-prefixes POST request <!-- Please indicate the nature of the change by placing an X in one of the boxes below. --> ### Change Type [X ] Addition [ ] Correction [ ] Deprecation [ ] Cleanup (formatting, typos, etc.) ### Environment NetBox version: 2.5.13 <!-- Describe the proposed change(s). --> ### Proposed Change Add `prefix_length` field in Swagger docs, something like: prefix_length* integer title: Prefix length Size of this prefix In /ipam/prefixes/{id}/available-prefixes/ POST request ![prefixes](https://user-images.githubusercontent.com/20525775/58911018-397cf980-8717-11e9-9964-d82a805f0778.png) </issue> <code> [start of netbox/ipam/api/views.py] 1 from django.conf import settings 2 from django.db.models import Count 3 from django.shortcuts import get_object_or_404 4 from django_pglocks import advisory_lock 5 from drf_yasg.utils import swagger_auto_schema 6 from rest_framework import status 7 from rest_framework.decorators import action 8 from rest_framework.exceptions import PermissionDenied 9 from rest_framework.response import Response 10 11 from extras.api.views import CustomFieldModelViewSet 12 from ipam import filters 13 from ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF 14 from utilities.api import ModelViewSet 15 from utilities.constants import ADVISORY_LOCK_KEYS 16 from utilities.utils import get_subquery 17 from . import serializers 18 19 20 # 21 # VRFs 22 # 23 24 class VRFViewSet(CustomFieldModelViewSet): 25 queryset = VRF.objects.prefetch_related('tenant').prefetch_related('tags').annotate( 26 ipaddress_count=get_subquery(IPAddress, 'vrf'), 27 prefix_count=get_subquery(Prefix, 'vrf') 28 ) 29 serializer_class = serializers.VRFSerializer 30 filterset_class = filters.VRFFilterSet 31 32 33 # 34 # RIRs 35 # 36 37 class RIRViewSet(ModelViewSet): 38 queryset = RIR.objects.annotate( 39 aggregate_count=Count('aggregates') 40 ) 41 serializer_class = serializers.RIRSerializer 42 filterset_class = filters.RIRFilterSet 43 44 45 # 46 # Aggregates 47 # 48 49 class AggregateViewSet(CustomFieldModelViewSet): 50 queryset = Aggregate.objects.prefetch_related('rir').prefetch_related('tags') 51 serializer_class = serializers.AggregateSerializer 52 filterset_class = filters.AggregateFilterSet 53 54 55 # 56 # Roles 57 # 58 59 class RoleViewSet(ModelViewSet): 60 queryset = Role.objects.annotate( 61 prefix_count=get_subquery(Prefix, 'role'), 62 vlan_count=get_subquery(VLAN, 'role') 63 ) 64 serializer_class = serializers.RoleSerializer 65 filterset_class = filters.RoleFilterSet 66 67 68 # 69 # Prefixes 70 # 71 72 class PrefixViewSet(CustomFieldModelViewSet): 73 queryset = Prefix.objects.prefetch_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role', 'tags') 74 serializer_class = serializers.PrefixSerializer 75 filterset_class = filters.PrefixFilterSet 76 77 @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)}) 78 @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)}) 79 @action(detail=True, url_path='available-prefixes', methods=['get', 'post']) 80 @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes']) 81 def available_prefixes(self, request, pk=None): 82 """ 83 A convenience method for returning available child prefixes within a parent. 84 85 The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being 86 invoked in parallel, which results in a race condition where multiple insertions can occur. 87 """ 88 prefix = get_object_or_404(Prefix, pk=pk) 89 available_prefixes = prefix.get_available_prefixes() 90 91 if request.method == 'POST': 92 93 # Validate Requested Prefixes' length 94 serializer = serializers.PrefixLengthSerializer( 95 data=request.data if isinstance(request.data, list) else [request.data], 96 many=True, 97 context={ 98 'request': request, 99 'prefix': prefix, 100 } 101 ) 102 if not serializer.is_valid(): 103 return Response( 104 serializer.errors, 105 status=status.HTTP_400_BAD_REQUEST 106 ) 107 108 requested_prefixes = serializer.validated_data 109 # Allocate prefixes to the requested objects based on availability within the parent 110 for i, requested_prefix in enumerate(requested_prefixes): 111 112 # Find the first available prefix equal to or larger than the requested size 113 for available_prefix in available_prefixes.iter_cidrs(): 114 if requested_prefix['prefix_length'] >= available_prefix.prefixlen: 115 allocated_prefix = '{}/{}'.format(available_prefix.network, requested_prefix['prefix_length']) 116 requested_prefix['prefix'] = allocated_prefix 117 requested_prefix['vrf'] = prefix.vrf.pk if prefix.vrf else None 118 break 119 else: 120 return Response( 121 { 122 "detail": "Insufficient space is available to accommodate the requested prefix size(s)" 123 }, 124 status=status.HTTP_204_NO_CONTENT 125 ) 126 127 # Remove the allocated prefix from the list of available prefixes 128 available_prefixes.remove(allocated_prefix) 129 130 # Initialize the serializer with a list or a single object depending on what was requested 131 context = {'request': request} 132 if isinstance(request.data, list): 133 serializer = serializers.PrefixSerializer(data=requested_prefixes, many=True, context=context) 134 else: 135 serializer = serializers.PrefixSerializer(data=requested_prefixes[0], context=context) 136 137 # Create the new Prefix(es) 138 if serializer.is_valid(): 139 serializer.save() 140 return Response(serializer.data, status=status.HTTP_201_CREATED) 141 142 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 143 144 else: 145 146 serializer = serializers.AvailablePrefixSerializer(available_prefixes.iter_cidrs(), many=True, context={ 147 'request': request, 148 'vrf': prefix.vrf, 149 }) 150 151 return Response(serializer.data) 152 153 @swagger_auto_schema(method='get', responses={200: serializers.AvailableIPSerializer(many=True)}) 154 @swagger_auto_schema(method='post', responses={201: serializers.AvailableIPSerializer(many=True)}, 155 request_body=serializers.AvailableIPSerializer(many=False)) 156 @action(detail=True, url_path='available-ips', methods=['get', 'post'], queryset=IPAddress.objects.all()) 157 @advisory_lock(ADVISORY_LOCK_KEYS['available-ips']) 158 def available_ips(self, request, pk=None): 159 """ 160 A convenience method for returning available IP addresses within a prefix. By default, the number of IPs 161 returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed, 162 however results will not be paginated. 163 164 The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being 165 invoked in parallel, which results in a race condition where multiple insertions can occur. 166 """ 167 prefix = get_object_or_404(Prefix, pk=pk) 168 169 # Create the next available IP within the prefix 170 if request.method == 'POST': 171 172 # Normalize to a list of objects 173 requested_ips = request.data if isinstance(request.data, list) else [request.data] 174 175 # Determine if the requested number of IPs is available 176 available_ips = prefix.get_available_ips() 177 if available_ips.size < len(requested_ips): 178 return Response( 179 { 180 "detail": "An insufficient number of IP addresses are available within the prefix {} ({} " 181 "requested, {} available)".format(prefix, len(requested_ips), len(available_ips)) 182 }, 183 status=status.HTTP_204_NO_CONTENT 184 ) 185 186 # Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix 187 available_ips = iter(available_ips) 188 prefix_length = prefix.prefix.prefixlen 189 for requested_ip in requested_ips: 190 requested_ip['address'] = '{}/{}'.format(next(available_ips), prefix_length) 191 requested_ip['vrf'] = prefix.vrf.pk if prefix.vrf else None 192 193 # Initialize the serializer with a list or a single object depending on what was requested 194 context = {'request': request} 195 if isinstance(request.data, list): 196 serializer = serializers.IPAddressSerializer(data=requested_ips, many=True, context=context) 197 else: 198 serializer = serializers.IPAddressSerializer(data=requested_ips[0], context=context) 199 200 # Create the new IP address(es) 201 if serializer.is_valid(): 202 serializer.save() 203 return Response(serializer.data, status=status.HTTP_201_CREATED) 204 205 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 206 207 # Determine the maximum number of IPs to return 208 else: 209 try: 210 limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT)) 211 except ValueError: 212 limit = settings.PAGINATE_COUNT 213 if settings.MAX_PAGE_SIZE: 214 limit = min(limit, settings.MAX_PAGE_SIZE) 215 216 # Calculate available IPs within the prefix 217 ip_list = [] 218 for index, ip in enumerate(prefix.get_available_ips(), start=1): 219 ip_list.append(ip) 220 if index == limit: 221 break 222 serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={ 223 'request': request, 224 'prefix': prefix.prefix, 225 'vrf': prefix.vrf, 226 }) 227 228 return Response(serializer.data) 229 230 231 # 232 # IP addresses 233 # 234 235 class IPAddressViewSet(CustomFieldModelViewSet): 236 queryset = IPAddress.objects.prefetch_related( 237 'vrf__tenant', 'tenant', 'nat_inside', 'interface__device__device_type', 'interface__virtual_machine', 238 'nat_outside', 'tags', 239 ) 240 serializer_class = serializers.IPAddressSerializer 241 filterset_class = filters.IPAddressFilterSet 242 243 244 # 245 # VLAN groups 246 # 247 248 class VLANGroupViewSet(ModelViewSet): 249 queryset = VLANGroup.objects.prefetch_related('site').annotate( 250 vlan_count=Count('vlans') 251 ) 252 serializer_class = serializers.VLANGroupSerializer 253 filterset_class = filters.VLANGroupFilterSet 254 255 256 # 257 # VLANs 258 # 259 260 class VLANViewSet(CustomFieldModelViewSet): 261 queryset = VLAN.objects.prefetch_related( 262 'site', 'group', 'tenant', 'role', 'tags' 263 ).annotate( 264 prefix_count=get_subquery(Prefix, 'vlan') 265 ) 266 serializer_class = serializers.VLANSerializer 267 filterset_class = filters.VLANFilterSet 268 269 270 # 271 # Services 272 # 273 274 class ServiceViewSet(ModelViewSet): 275 queryset = Service.objects.prefetch_related('device').prefetch_related('tags') 276 serializer_class = serializers.ServiceSerializer 277 filterset_class = filters.ServiceFilterSet 278 [end of netbox/ipam/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/ipam/api/views.py b/netbox/ipam/api/views.py --- a/netbox/ipam/api/views.py +++ b/netbox/ipam/api/views.py @@ -74,6 +74,11 @@ serializer_class = serializers.PrefixSerializer filterset_class = filters.PrefixFilterSet + def get_serializer_class(self): + if self.action == "available_prefixes" and self.request.method == "POST": + return serializers.PrefixLengthSerializer + return super().get_serializer_class() + @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)}) @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)}) @action(detail=True, url_path='available-prefixes', methods=['get', 'post'])
{"golden_diff": "diff --git a/netbox/ipam/api/views.py b/netbox/ipam/api/views.py\n--- a/netbox/ipam/api/views.py\n+++ b/netbox/ipam/api/views.py\n@@ -74,6 +74,11 @@\n serializer_class = serializers.PrefixSerializer\n filterset_class = filters.PrefixFilterSet\n \n+ def get_serializer_class(self):\n+ if self.action == \"available_prefixes\" and self.request.method == \"POST\":\n+ return serializers.PrefixLengthSerializer\n+ return super().get_serializer_class()\n+\n @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)})\n @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)})\n @action(detail=True, url_path='available-prefixes', methods=['get', 'post'])\n", "issue": "Swagger doesn't show required field \"prefix_length\" in available-prefixes POST request\n<!--\r\n Please indicate the nature of the change by placing an X in one of the\r\n boxes below.\r\n-->\r\n### Change Type\r\n[X ] Addition\r\n[ ] Correction\r\n[ ] Deprecation\r\n[ ] Cleanup (formatting, typos, etc.)\r\n\r\n### Environment\r\nNetBox version: 2.5.13\r\n\r\n<!-- Describe the proposed change(s). -->\r\n### Proposed Change\r\n\r\nAdd `prefix_length` field in Swagger docs, something like:\r\n\r\nprefix_length* integer\r\ntitle: Prefix length\r\nSize of this prefix \r\n\r\nIn /ipam/prefixes/{id}/available-prefixes/ POST request\r\n\r\n![prefixes](https://user-images.githubusercontent.com/20525775/58911018-397cf980-8717-11e9-9964-d82a805f0778.png)\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404\nfrom django_pglocks import advisory_lock\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom extras.api.views import CustomFieldModelViewSet\nfrom ipam import filters\nfrom ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF\nfrom utilities.api import ModelViewSet\nfrom utilities.constants import ADVISORY_LOCK_KEYS\nfrom utilities.utils import get_subquery\nfrom . import serializers\n\n\n#\n# VRFs\n#\n\nclass VRFViewSet(CustomFieldModelViewSet):\n queryset = VRF.objects.prefetch_related('tenant').prefetch_related('tags').annotate(\n ipaddress_count=get_subquery(IPAddress, 'vrf'),\n prefix_count=get_subquery(Prefix, 'vrf')\n )\n serializer_class = serializers.VRFSerializer\n filterset_class = filters.VRFFilterSet\n\n\n#\n# RIRs\n#\n\nclass RIRViewSet(ModelViewSet):\n queryset = RIR.objects.annotate(\n aggregate_count=Count('aggregates')\n )\n serializer_class = serializers.RIRSerializer\n filterset_class = filters.RIRFilterSet\n\n\n#\n# Aggregates\n#\n\nclass AggregateViewSet(CustomFieldModelViewSet):\n queryset = Aggregate.objects.prefetch_related('rir').prefetch_related('tags')\n serializer_class = serializers.AggregateSerializer\n filterset_class = filters.AggregateFilterSet\n\n\n#\n# Roles\n#\n\nclass RoleViewSet(ModelViewSet):\n queryset = Role.objects.annotate(\n prefix_count=get_subquery(Prefix, 'role'),\n vlan_count=get_subquery(VLAN, 'role')\n )\n serializer_class = serializers.RoleSerializer\n filterset_class = filters.RoleFilterSet\n\n\n#\n# Prefixes\n#\n\nclass PrefixViewSet(CustomFieldModelViewSet):\n queryset = Prefix.objects.prefetch_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role', 'tags')\n serializer_class = serializers.PrefixSerializer\n filterset_class = filters.PrefixFilterSet\n\n @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)})\n @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)})\n @action(detail=True, url_path='available-prefixes', methods=['get', 'post'])\n @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes'])\n def available_prefixes(self, request, pk=None):\n \"\"\"\n A convenience method for returning available child prefixes within a parent.\n\n The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being\n invoked in parallel, which results in a race condition where multiple insertions can occur.\n \"\"\"\n prefix = get_object_or_404(Prefix, pk=pk)\n available_prefixes = prefix.get_available_prefixes()\n\n if request.method == 'POST':\n\n # Validate Requested Prefixes' length\n serializer = serializers.PrefixLengthSerializer(\n data=request.data if isinstance(request.data, list) else [request.data],\n many=True,\n context={\n 'request': request,\n 'prefix': prefix,\n }\n )\n if not serializer.is_valid():\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n requested_prefixes = serializer.validated_data\n # Allocate prefixes to the requested objects based on availability within the parent\n for i, requested_prefix in enumerate(requested_prefixes):\n\n # Find the first available prefix equal to or larger than the requested size\n for available_prefix in available_prefixes.iter_cidrs():\n if requested_prefix['prefix_length'] >= available_prefix.prefixlen:\n allocated_prefix = '{}/{}'.format(available_prefix.network, requested_prefix['prefix_length'])\n requested_prefix['prefix'] = allocated_prefix\n requested_prefix['vrf'] = prefix.vrf.pk if prefix.vrf else None\n break\n else:\n return Response(\n {\n \"detail\": \"Insufficient space is available to accommodate the requested prefix size(s)\"\n },\n status=status.HTTP_204_NO_CONTENT\n )\n\n # Remove the allocated prefix from the list of available prefixes\n available_prefixes.remove(allocated_prefix)\n\n # Initialize the serializer with a list or a single object depending on what was requested\n context = {'request': request}\n if isinstance(request.data, list):\n serializer = serializers.PrefixSerializer(data=requested_prefixes, many=True, context=context)\n else:\n serializer = serializers.PrefixSerializer(data=requested_prefixes[0], context=context)\n\n # Create the new Prefix(es)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n\n serializer = serializers.AvailablePrefixSerializer(available_prefixes.iter_cidrs(), many=True, context={\n 'request': request,\n 'vrf': prefix.vrf,\n })\n\n return Response(serializer.data)\n\n @swagger_auto_schema(method='get', responses={200: serializers.AvailableIPSerializer(many=True)})\n @swagger_auto_schema(method='post', responses={201: serializers.AvailableIPSerializer(many=True)},\n request_body=serializers.AvailableIPSerializer(many=False))\n @action(detail=True, url_path='available-ips', methods=['get', 'post'], queryset=IPAddress.objects.all())\n @advisory_lock(ADVISORY_LOCK_KEYS['available-ips'])\n def available_ips(self, request, pk=None):\n \"\"\"\n A convenience method for returning available IP addresses within a prefix. By default, the number of IPs\n returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed,\n however results will not be paginated.\n\n The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being\n invoked in parallel, which results in a race condition where multiple insertions can occur.\n \"\"\"\n prefix = get_object_or_404(Prefix, pk=pk)\n\n # Create the next available IP within the prefix\n if request.method == 'POST':\n\n # Normalize to a list of objects\n requested_ips = request.data if isinstance(request.data, list) else [request.data]\n\n # Determine if the requested number of IPs is available\n available_ips = prefix.get_available_ips()\n if available_ips.size < len(requested_ips):\n return Response(\n {\n \"detail\": \"An insufficient number of IP addresses are available within the prefix {} ({} \"\n \"requested, {} available)\".format(prefix, len(requested_ips), len(available_ips))\n },\n status=status.HTTP_204_NO_CONTENT\n )\n\n # Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix\n available_ips = iter(available_ips)\n prefix_length = prefix.prefix.prefixlen\n for requested_ip in requested_ips:\n requested_ip['address'] = '{}/{}'.format(next(available_ips), prefix_length)\n requested_ip['vrf'] = prefix.vrf.pk if prefix.vrf else None\n\n # Initialize the serializer with a list or a single object depending on what was requested\n context = {'request': request}\n if isinstance(request.data, list):\n serializer = serializers.IPAddressSerializer(data=requested_ips, many=True, context=context)\n else:\n serializer = serializers.IPAddressSerializer(data=requested_ips[0], context=context)\n\n # Create the new IP address(es)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # Determine the maximum number of IPs to return\n else:\n try:\n limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT))\n except ValueError:\n limit = settings.PAGINATE_COUNT\n if settings.MAX_PAGE_SIZE:\n limit = min(limit, settings.MAX_PAGE_SIZE)\n\n # Calculate available IPs within the prefix\n ip_list = []\n for index, ip in enumerate(prefix.get_available_ips(), start=1):\n ip_list.append(ip)\n if index == limit:\n break\n serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={\n 'request': request,\n 'prefix': prefix.prefix,\n 'vrf': prefix.vrf,\n })\n\n return Response(serializer.data)\n\n\n#\n# IP addresses\n#\n\nclass IPAddressViewSet(CustomFieldModelViewSet):\n queryset = IPAddress.objects.prefetch_related(\n 'vrf__tenant', 'tenant', 'nat_inside', 'interface__device__device_type', 'interface__virtual_machine',\n 'nat_outside', 'tags',\n )\n serializer_class = serializers.IPAddressSerializer\n filterset_class = filters.IPAddressFilterSet\n\n\n#\n# VLAN groups\n#\n\nclass VLANGroupViewSet(ModelViewSet):\n queryset = VLANGroup.objects.prefetch_related('site').annotate(\n vlan_count=Count('vlans')\n )\n serializer_class = serializers.VLANGroupSerializer\n filterset_class = filters.VLANGroupFilterSet\n\n\n#\n# VLANs\n#\n\nclass VLANViewSet(CustomFieldModelViewSet):\n queryset = VLAN.objects.prefetch_related(\n 'site', 'group', 'tenant', 'role', 'tags'\n ).annotate(\n prefix_count=get_subquery(Prefix, 'vlan')\n )\n serializer_class = serializers.VLANSerializer\n filterset_class = filters.VLANFilterSet\n\n\n#\n# Services\n#\n\nclass ServiceViewSet(ModelViewSet):\n queryset = Service.objects.prefetch_related('device').prefetch_related('tags')\n serializer_class = serializers.ServiceSerializer\n filterset_class = filters.ServiceFilterSet\n", "path": "netbox/ipam/api/views.py"}]}
3,629
187
gh_patches_debug_11338
rasdani/github-patches
git_diff
lutris__lutris-2683
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Commit 9102071 Breaks Gentoo compatibility Using a relative path breaks Gentoo compatibility. Due to Gentoo's ability to have multiple versions of python installed simutaneously, they do some symlink resolution magic that determines which python interpreter should be used for a package based on the constraining flags applied at installation. Because of this lutris script is not located in /usr/bin/lutris. This is a symlink to the resolver that then performs its magic. This causes lutris to fail to be able to call the python-wrapper script. Recommend non-relative path to lutris-wrapper. Commit 9102071 Breaks Gentoo compatibility Using a relative path breaks Gentoo compatibility. Due to Gentoo's ability to have multiple versions of python installed simutaneously, they do some symlink resolution magic that determines which python interpreter should be used for a package based on the constraining flags applied at installation. Because of this lutris script is not located in /usr/bin/lutris. This is a symlink to the resolver that then performs its magic. This causes lutris to fail to be able to call the python-wrapper script. Recommend non-relative path to lutris-wrapper. </issue> <code> [start of lutris/command.py] 1 """Threading module, used to launch games while monitoring them.""" 2 3 import io 4 import os 5 import sys 6 import fcntl 7 import shlex 8 import subprocess 9 import contextlib 10 from textwrap import dedent 11 12 from gi.repository import GLib 13 14 from lutris import settings 15 from lutris import runtime 16 from lutris.util.log import logger 17 from lutris.util import system 18 19 WRAPPER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "../share/lutris/bin/lutris-wrapper")) 20 21 22 class MonitoredCommand: 23 """Exexcutes a commmand while keeping track of its state""" 24 25 fallback_cwd = "/tmp" 26 27 def __init__( 28 self, 29 command, 30 runner=None, 31 env=None, 32 term=None, 33 cwd=None, 34 include_processes=None, 35 exclude_processes=None, 36 log_buffer=None, 37 title=None, 38 ): # pylint: disable=too-many-arguments 39 self.ready_state = True 40 self.env = self.get_environment(env) 41 42 self.command = command 43 self.runner = runner 44 self.stop_func = lambda: True 45 self.game_process = None 46 self.prevent_on_stop = False 47 self.return_code = None 48 self.terminal = system.find_executable(term) 49 self.is_running = True 50 self.error = None 51 self.log_handlers = [ 52 self.log_handler_stdout, 53 self.log_handler_console_output, 54 ] 55 self.set_log_buffer(log_buffer) 56 self.stdout_monitor = None 57 self.include_processes = include_processes or [] 58 self.exclude_processes = exclude_processes or [] 59 60 self.cwd = self.get_cwd(cwd) 61 62 self._stdout = io.StringIO() 63 64 self._title = title if title else command[0] 65 66 @property 67 def stdout(self): 68 return self._stdout.getvalue() 69 70 @property 71 def wrapper_command(self): 72 """Return launch arguments for the wrapper script""" 73 74 return [ 75 WRAPPER_SCRIPT, 76 self._title, 77 str(len(self.include_processes)), 78 str(len(self.exclude_processes)), 79 ] + self.include_processes + self.exclude_processes + self.command 80 81 def set_log_buffer(self, log_buffer): 82 """Attach a TextBuffer to this command enables the buffer handler""" 83 if not log_buffer: 84 return 85 self.log_buffer = log_buffer 86 if self.log_handler_buffer not in self.log_handlers: 87 self.log_handlers.append(self.log_handler_buffer) 88 89 def get_cwd(self, cwd): 90 """Return the current working dir of the game""" 91 if not cwd: 92 cwd = self.runner.working_dir if self.runner else None 93 return os.path.expanduser(cwd or "~") 94 95 @staticmethod 96 def get_environment(user_env): 97 """Process the user provided environment variables for use as self.env""" 98 env = user_env or {} 99 # not clear why this needs to be added, the path is already added in 100 # the wrappper script. 101 env['PYTHONPATH'] = ':'.join(sys.path) 102 # Drop bad values of environment keys, those will confuse the Python 103 # interpreter. 104 return { 105 key: value for key, value in env.items() if "=" not in key 106 } 107 108 def get_child_environment(self): 109 """Returns the calculated environment for the child process.""" 110 env = os.environ.copy() 111 env.update(self.env) 112 return env 113 114 def start(self): 115 """Run the thread.""" 116 logger.debug("Running %s", " ".join(self.wrapper_command)) 117 for key, value in self.env.items(): 118 logger.debug("ENV: %s=\"%s\"", key, value) 119 120 if self.terminal: 121 self.game_process = self.run_in_terminal() 122 else: 123 env = self.get_child_environment() 124 self.game_process = self.execute_process(self.wrapper_command, env) 125 126 if not self.game_process: 127 logger.warning("No game process available") 128 return 129 130 GLib.child_watch_add(self.game_process.pid, self.on_stop) 131 132 # make stdout nonblocking. 133 fileno = self.game_process.stdout.fileno() 134 fcntl.fcntl( 135 fileno, 136 fcntl.F_SETFL, 137 fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK 138 ) 139 140 self.stdout_monitor = GLib.io_add_watch( 141 self.game_process.stdout, 142 GLib.IO_IN | GLib.IO_HUP, 143 self.on_stdout_output, 144 ) 145 146 def log_handler_stdout(self, line): 147 """Add the line to this command's stdout attribute""" 148 self._stdout.write(line) 149 150 def log_handler_buffer(self, line): 151 """Add the line to the associated LogBuffer object""" 152 self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1) 153 154 def log_handler_console_output(self, line): # pylint: disable=no-self-use 155 """Print the line to stdout""" 156 with contextlib.suppress(BlockingIOError): 157 sys.stdout.write(line) 158 sys.stdout.flush() 159 160 def on_stop(self, _pid, returncode): 161 """Callback registered on game process termination""" 162 if self.prevent_on_stop: # stop() already in progress 163 return False 164 165 logger.debug("The process has terminated with code %s", returncode) 166 self.is_running = False 167 self.return_code = returncode 168 169 resume_stop = self.stop() 170 if not resume_stop: 171 logger.info("Full shutdown prevented") 172 return False 173 174 return False 175 176 def on_stdout_output(self, stdout, condition): 177 """Called by the stdout monitor to dispatch output to log handlers""" 178 if condition == GLib.IO_HUP: 179 self.stdout_monitor = None 180 return False 181 if not self.is_running: 182 return False 183 try: 184 line = stdout.read(262144).decode("utf-8", errors="ignore") 185 except ValueError: 186 # file_desc might be closed 187 return True 188 if "winemenubuilder.exe" in line: 189 return True 190 for log_handler in self.log_handlers: 191 log_handler(line) 192 return True 193 194 def run_in_terminal(self): 195 """Write command in a script file and run it. 196 197 Running it from a file is likely the only way to set env vars only 198 for the command (not for the terminal app). 199 It's also the only reliable way to keep the term open when the 200 game is quit. 201 """ 202 script_path = os.path.join(settings.CACHE_DIR, "run_in_term.sh") 203 exported_environment = "\n".join( 204 'export %s="%s" ' % (key, value) 205 for key, value in self.env.items() 206 ) 207 command = " ".join(['"%s"' % token for token in self.wrapper_command]) 208 with open(script_path, "w") as script_file: 209 script_file.write(dedent( 210 """#!/bin/sh 211 cd "%s" 212 %s 213 exec %s 214 """ % (self.cwd, exported_environment, command) 215 )) 216 os.chmod(script_path, 0o744) 217 return self.execute_process([self.terminal, "-e", script_path]) 218 219 def execute_process(self, command, env=None): 220 """Execute and return a subprocess""" 221 if self.cwd and not system.path_exists(self.cwd): 222 try: 223 os.makedirs(self.cwd) 224 except OSError: 225 logger.error("Failed to create working directory, falling back to %s", 226 self.fallback_cwd) 227 self.cwd = "/tmp" 228 try: 229 230 return subprocess.Popen( 231 command, 232 stdout=subprocess.PIPE, 233 stderr=subprocess.STDOUT, 234 cwd=self.cwd, 235 env=env, 236 ) 237 except OSError as ex: 238 logger.exception("Failed to execute %s: %s", " ".join(command), ex) 239 self.error = ex.strerror 240 241 def stop(self): 242 """Stops the current game process and cleans up the instance""" 243 # Prevent stop() being called again by the process exiting 244 self.prevent_on_stop = True 245 246 try: 247 self.game_process.terminate() 248 except ProcessLookupError: # process already dead. 249 logger.debug("Management process looks dead already.") 250 251 if hasattr(self, "stop_func"): 252 resume_stop = self.stop_func() 253 if not resume_stop: 254 return False 255 256 if self.stdout_monitor: 257 logger.debug("Detaching logger") 258 GLib.source_remove(self.stdout_monitor) 259 self.stdout_monitor = None 260 else: 261 logger.debug("logger already detached") 262 263 self.is_running = False 264 self.ready_state = False 265 return True 266 267 268 def exec_command(command): 269 """Execute arbitrary command in a MonitoredCommand 270 271 Used by the --exec command line flag. 272 """ 273 command = MonitoredCommand(shlex.split(command), env=runtime.get_env()) 274 command.start() 275 return command 276 [end of lutris/command.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/command.py b/lutris/command.py --- a/lutris/command.py +++ b/lutris/command.py @@ -17,6 +17,15 @@ from lutris.util import system WRAPPER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "../share/lutris/bin/lutris-wrapper")) +if not os.path.isfile(WRAPPER_SCRIPT): + usr_path = "/usr/share/lutris/bin/lutris-wrapper" + usr_local_path = "/usr/local/share/lutris/bin/lutris-wrapper" + if os.path.isfile(usr_path): + WRAPPER_SCRIPT = usr_path + elif os.path.isfile(usr_local_path): + WRAPPER_SCRIPT = usr_local_path + else: + raise FileNotFoundError("Couldn't find lutris-wrapper script in any of the expected locations") class MonitoredCommand:
{"golden_diff": "diff --git a/lutris/command.py b/lutris/command.py\n--- a/lutris/command.py\n+++ b/lutris/command.py\n@@ -17,6 +17,15 @@\n from lutris.util import system\n \n WRAPPER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"../share/lutris/bin/lutris-wrapper\"))\n+if not os.path.isfile(WRAPPER_SCRIPT):\n+ usr_path = \"/usr/share/lutris/bin/lutris-wrapper\"\n+ usr_local_path = \"/usr/local/share/lutris/bin/lutris-wrapper\"\n+ if os.path.isfile(usr_path):\n+ WRAPPER_SCRIPT = usr_path\n+ elif os.path.isfile(usr_local_path):\n+ WRAPPER_SCRIPT = usr_local_path\n+ else:\n+ raise FileNotFoundError(\"Couldn't find lutris-wrapper script in any of the expected locations\")\n \n \n class MonitoredCommand:\n", "issue": "Commit 9102071 Breaks Gentoo compatibility\nUsing a relative path breaks Gentoo compatibility.\r\n\r\nDue to Gentoo's ability to have multiple versions of python installed simutaneously, they do some symlink resolution magic that determines which python interpreter should be used for a package based on the constraining flags applied at installation.\r\n\r\nBecause of this lutris script is not located in /usr/bin/lutris. This is a symlink to the resolver that then performs its magic. This causes lutris to fail to be able to call the python-wrapper script.\r\n\r\nRecommend non-relative path to lutris-wrapper.\nCommit 9102071 Breaks Gentoo compatibility\nUsing a relative path breaks Gentoo compatibility.\r\n\r\nDue to Gentoo's ability to have multiple versions of python installed simutaneously, they do some symlink resolution magic that determines which python interpreter should be used for a package based on the constraining flags applied at installation.\r\n\r\nBecause of this lutris script is not located in /usr/bin/lutris. This is a symlink to the resolver that then performs its magic. This causes lutris to fail to be able to call the python-wrapper script.\r\n\r\nRecommend non-relative path to lutris-wrapper.\n", "before_files": [{"content": "\"\"\"Threading module, used to launch games while monitoring them.\"\"\"\n\nimport io\nimport os\nimport sys\nimport fcntl\nimport shlex\nimport subprocess\nimport contextlib\nfrom textwrap import dedent\n\nfrom gi.repository import GLib\n\nfrom lutris import settings\nfrom lutris import runtime\nfrom lutris.util.log import logger\nfrom lutris.util import system\n\nWRAPPER_SCRIPT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), \"../share/lutris/bin/lutris-wrapper\"))\n\n\nclass MonitoredCommand:\n \"\"\"Exexcutes a commmand while keeping track of its state\"\"\"\n\n fallback_cwd = \"/tmp\"\n\n def __init__(\n self,\n command,\n runner=None,\n env=None,\n term=None,\n cwd=None,\n include_processes=None,\n exclude_processes=None,\n log_buffer=None,\n title=None,\n ): # pylint: disable=too-many-arguments\n self.ready_state = True\n self.env = self.get_environment(env)\n\n self.command = command\n self.runner = runner\n self.stop_func = lambda: True\n self.game_process = None\n self.prevent_on_stop = False\n self.return_code = None\n self.terminal = system.find_executable(term)\n self.is_running = True\n self.error = None\n self.log_handlers = [\n self.log_handler_stdout,\n self.log_handler_console_output,\n ]\n self.set_log_buffer(log_buffer)\n self.stdout_monitor = None\n self.include_processes = include_processes or []\n self.exclude_processes = exclude_processes or []\n\n self.cwd = self.get_cwd(cwd)\n\n self._stdout = io.StringIO()\n\n self._title = title if title else command[0]\n\n @property\n def stdout(self):\n return self._stdout.getvalue()\n\n @property\n def wrapper_command(self):\n \"\"\"Return launch arguments for the wrapper script\"\"\"\n\n return [\n WRAPPER_SCRIPT,\n self._title,\n str(len(self.include_processes)),\n str(len(self.exclude_processes)),\n ] + self.include_processes + self.exclude_processes + self.command\n\n def set_log_buffer(self, log_buffer):\n \"\"\"Attach a TextBuffer to this command enables the buffer handler\"\"\"\n if not log_buffer:\n return\n self.log_buffer = log_buffer\n if self.log_handler_buffer not in self.log_handlers:\n self.log_handlers.append(self.log_handler_buffer)\n\n def get_cwd(self, cwd):\n \"\"\"Return the current working dir of the game\"\"\"\n if not cwd:\n cwd = self.runner.working_dir if self.runner else None\n return os.path.expanduser(cwd or \"~\")\n\n @staticmethod\n def get_environment(user_env):\n \"\"\"Process the user provided environment variables for use as self.env\"\"\"\n env = user_env or {}\n # not clear why this needs to be added, the path is already added in\n # the wrappper script.\n env['PYTHONPATH'] = ':'.join(sys.path)\n # Drop bad values of environment keys, those will confuse the Python\n # interpreter.\n return {\n key: value for key, value in env.items() if \"=\" not in key\n }\n\n def get_child_environment(self):\n \"\"\"Returns the calculated environment for the child process.\"\"\"\n env = os.environ.copy()\n env.update(self.env)\n return env\n\n def start(self):\n \"\"\"Run the thread.\"\"\"\n logger.debug(\"Running %s\", \" \".join(self.wrapper_command))\n for key, value in self.env.items():\n logger.debug(\"ENV: %s=\\\"%s\\\"\", key, value)\n\n if self.terminal:\n self.game_process = self.run_in_terminal()\n else:\n env = self.get_child_environment()\n self.game_process = self.execute_process(self.wrapper_command, env)\n\n if not self.game_process:\n logger.warning(\"No game process available\")\n return\n\n GLib.child_watch_add(self.game_process.pid, self.on_stop)\n\n # make stdout nonblocking.\n fileno = self.game_process.stdout.fileno()\n fcntl.fcntl(\n fileno,\n fcntl.F_SETFL,\n fcntl.fcntl(fileno, fcntl.F_GETFL) | os.O_NONBLOCK\n )\n\n self.stdout_monitor = GLib.io_add_watch(\n self.game_process.stdout,\n GLib.IO_IN | GLib.IO_HUP,\n self.on_stdout_output,\n )\n\n def log_handler_stdout(self, line):\n \"\"\"Add the line to this command's stdout attribute\"\"\"\n self._stdout.write(line)\n\n def log_handler_buffer(self, line):\n \"\"\"Add the line to the associated LogBuffer object\"\"\"\n self.log_buffer.insert(self.log_buffer.get_end_iter(), line, -1)\n\n def log_handler_console_output(self, line): # pylint: disable=no-self-use\n \"\"\"Print the line to stdout\"\"\"\n with contextlib.suppress(BlockingIOError):\n sys.stdout.write(line)\n sys.stdout.flush()\n\n def on_stop(self, _pid, returncode):\n \"\"\"Callback registered on game process termination\"\"\"\n if self.prevent_on_stop: # stop() already in progress\n return False\n\n logger.debug(\"The process has terminated with code %s\", returncode)\n self.is_running = False\n self.return_code = returncode\n\n resume_stop = self.stop()\n if not resume_stop:\n logger.info(\"Full shutdown prevented\")\n return False\n\n return False\n\n def on_stdout_output(self, stdout, condition):\n \"\"\"Called by the stdout monitor to dispatch output to log handlers\"\"\"\n if condition == GLib.IO_HUP:\n self.stdout_monitor = None\n return False\n if not self.is_running:\n return False\n try:\n line = stdout.read(262144).decode(\"utf-8\", errors=\"ignore\")\n except ValueError:\n # file_desc might be closed\n return True\n if \"winemenubuilder.exe\" in line:\n return True\n for log_handler in self.log_handlers:\n log_handler(line)\n return True\n\n def run_in_terminal(self):\n \"\"\"Write command in a script file and run it.\n\n Running it from a file is likely the only way to set env vars only\n for the command (not for the terminal app).\n It's also the only reliable way to keep the term open when the\n game is quit.\n \"\"\"\n script_path = os.path.join(settings.CACHE_DIR, \"run_in_term.sh\")\n exported_environment = \"\\n\".join(\n 'export %s=\"%s\" ' % (key, value)\n for key, value in self.env.items()\n )\n command = \" \".join(['\"%s\"' % token for token in self.wrapper_command])\n with open(script_path, \"w\") as script_file:\n script_file.write(dedent(\n \"\"\"#!/bin/sh\n cd \"%s\"\n %s\n exec %s\n \"\"\" % (self.cwd, exported_environment, command)\n ))\n os.chmod(script_path, 0o744)\n return self.execute_process([self.terminal, \"-e\", script_path])\n\n def execute_process(self, command, env=None):\n \"\"\"Execute and return a subprocess\"\"\"\n if self.cwd and not system.path_exists(self.cwd):\n try:\n os.makedirs(self.cwd)\n except OSError:\n logger.error(\"Failed to create working directory, falling back to %s\",\n self.fallback_cwd)\n self.cwd = \"/tmp\"\n try:\n\n return subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n cwd=self.cwd,\n env=env,\n )\n except OSError as ex:\n logger.exception(\"Failed to execute %s: %s\", \" \".join(command), ex)\n self.error = ex.strerror\n\n def stop(self):\n \"\"\"Stops the current game process and cleans up the instance\"\"\"\n # Prevent stop() being called again by the process exiting\n self.prevent_on_stop = True\n\n try:\n self.game_process.terminate()\n except ProcessLookupError: # process already dead.\n logger.debug(\"Management process looks dead already.\")\n\n if hasattr(self, \"stop_func\"):\n resume_stop = self.stop_func()\n if not resume_stop:\n return False\n\n if self.stdout_monitor:\n logger.debug(\"Detaching logger\")\n GLib.source_remove(self.stdout_monitor)\n self.stdout_monitor = None\n else:\n logger.debug(\"logger already detached\")\n\n self.is_running = False\n self.ready_state = False\n return True\n\n\ndef exec_command(command):\n \"\"\"Execute arbitrary command in a MonitoredCommand\n\n Used by the --exec command line flag.\n \"\"\"\n command = MonitoredCommand(shlex.split(command), env=runtime.get_env())\n command.start()\n return command\n", "path": "lutris/command.py"}]}
3,397
200
gh_patches_debug_1803
rasdani/github-patches
git_diff
unionai-oss__pandera-909
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implicit ignore_na=True behaviour causes custom dataframe checks to be ignored by default **Describe the bug** When using custom dataframe checks returning boolean series, `SchemaModel.validate()` does not report validation errors generated by a custom check if the record in question (or is it the entire dataframe?) contains null values. This behaviour is deafult and can be disabled by setting `ignore_na` to `False` (either via `@pa.dataframe_check(ignore_na=False)` or a Config entry such as `check_name = {"ignore_na": False}`). This default behaviour is surprising, and the root cause of my issue was discovered only after a lengthy debugging process with much help from @cosmicBboy. Perhaps this default behaviour could be changed or at least better documented, as Niels mentions that this has come up more than once previously. - [x] I have checked that this issue has not already been reported. - [x] I have confirmed this bug exists on the latest version of pandera. - [x] (optional) I have confirmed this bug exists on the master branch of pandera. #### Code Sample, a copy-pastable example CSV (DataFrame): ``` field0,field1,field2 ,foo,foo ``` ```python @pa.dataframe_check def field1_does_not_equal_field2(cls, df: pd.DataFrame) -> Series[bool]: return df["field1"] != df["field2"] # Even though field1 == field2, validation passes because field0 is null ``` #### Expected behavior Create a custom Pandera dataframe check returning a boolean series that accepts a CSV containing null values. </issue> <code> [start of pandera/check_utils.py] 1 """Utility functions for validation.""" 2 3 from functools import lru_cache 4 from typing import NamedTuple, Optional, Tuple, Union 5 6 import pandas as pd 7 8 SupportedTypes = NamedTuple( 9 "SupportedTypes", 10 ( 11 ("table_types", Tuple[type, ...]), 12 ("field_types", Tuple[type, ...]), 13 ("index_types", Tuple[type, ...]), 14 ("multiindex_types", Tuple[type, ...]), 15 ), 16 ) 17 18 19 @lru_cache(maxsize=None) 20 def _supported_types(): 21 # pylint: disable=import-outside-toplevel 22 table_types = [pd.DataFrame] 23 field_types = [pd.Series] 24 index_types = [pd.Index] 25 multiindex_types = [pd.MultiIndex] 26 27 try: 28 import pyspark.pandas as ps 29 30 table_types.append(ps.DataFrame) 31 field_types.append(ps.Series) 32 index_types.append(ps.Index) 33 multiindex_types.append(ps.MultiIndex) 34 except ImportError: 35 pass 36 try: # pragma: no cover 37 import modin.pandas as mpd 38 39 table_types.append(mpd.DataFrame) 40 field_types.append(mpd.Series) 41 index_types.append(mpd.Index) 42 multiindex_types.append(mpd.MultiIndex) 43 except ImportError: 44 pass 45 try: 46 import dask.dataframe as dd 47 48 table_types.append(dd.DataFrame) 49 field_types.append(dd.Series) 50 index_types.append(dd.Index) 51 except ImportError: 52 pass 53 54 return SupportedTypes( 55 tuple(table_types), 56 tuple(field_types), 57 tuple(index_types), 58 tuple(multiindex_types), 59 ) 60 61 62 def is_table(obj): 63 """Verifies whether an object is table-like. 64 65 Where a table is a 2-dimensional data matrix of rows and columns, which 66 can be indexed in multiple different ways. 67 """ 68 return isinstance(obj, _supported_types().table_types) 69 70 71 def is_field(obj): 72 """Verifies whether an object is field-like. 73 74 Where a field is a columnar representation of data in a table-like 75 data structure. 76 """ 77 return isinstance(obj, _supported_types().field_types) 78 79 80 def is_index(obj): 81 """Verifies whether an object is a table index.""" 82 return isinstance(obj, _supported_types().index_types) 83 84 85 def is_multiindex(obj): 86 """Verifies whether an object is a multi-level table index.""" 87 return isinstance(obj, _supported_types().multiindex_types) 88 89 90 def is_supported_check_obj(obj): 91 """Verifies whether an object is table- or field-like.""" 92 return is_table(obj) or is_field(obj) 93 94 95 def prepare_series_check_output( 96 check_obj: Union[pd.Series, pd.DataFrame], 97 check_output: pd.Series, 98 ignore_na: bool = True, 99 n_failure_cases: Optional[int] = None, 100 ) -> Tuple[pd.Series, pd.Series]: 101 """Prepare the check output and failure cases for a Series check output. 102 103 check_obj can be a dataframe, since a check function can potentially return 104 a Series resulting from applying some check function that outputs a Series. 105 """ 106 if ignore_na: 107 isna = ( 108 check_obj.isna().any(axis="columns") 109 if isinstance(check_obj, pd.DataFrame) 110 else check_obj.isna() 111 ) 112 try: 113 check_output = check_output | isna 114 except AttributeError: 115 # convert check_output to numpy for modin compatibility 116 check_output = check_output.to_numpy() | isna 117 118 failure_cases = check_obj[~check_output] 119 if not failure_cases.empty and n_failure_cases is not None: 120 # NOTE: this is a hack to support pyspark.pandas and modin, since you 121 # can't use groupby on a dataframe with another dataframe 122 if type(failure_cases).__module__.startswith("pyspark.pandas") or type( 123 failure_cases 124 ).__module__.startswith("modin.pandas"): 125 failure_cases = ( 126 failure_cases.rename("failure_cases") 127 .to_frame() 128 .assign(check_output=check_output) 129 .groupby("check_output") 130 .head(n_failure_cases)["failure_cases"] 131 ) 132 else: 133 failure_cases = failure_cases.groupby(check_output).head( 134 n_failure_cases 135 ) 136 return check_output, failure_cases 137 138 139 def prepare_dataframe_check_output( 140 check_obj: pd.DataFrame, 141 check_output: pd.DataFrame, 142 df_orig: Optional[pd.DataFrame] = None, 143 ignore_na: bool = True, 144 n_failure_cases: Optional[int] = None, 145 ) -> Tuple[pd.Series, pd.Series]: 146 """Unstack a dataframe of boolean values. 147 148 Check results consisting of a boolean dataframe should be reported at the 149 most granular level. 150 """ 151 if df_orig is not None: 152 assert df_orig.shape == check_output.shape 153 154 if df_orig is None: 155 df_orig = check_obj 156 check_output = check_output.unstack() 157 if ignore_na: 158 check_output = check_output | df_orig.unstack().isna() 159 failure_cases = ( 160 check_obj.unstack()[~check_output] 161 .rename("failure_case") 162 .rename_axis(["column", "index"]) 163 .reset_index() 164 ) 165 if not failure_cases.empty and n_failure_cases is not None: 166 failure_cases = failure_cases.drop_duplicates().head(n_failure_cases) 167 return check_output, failure_cases 168 [end of pandera/check_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pandera/check_utils.py b/pandera/check_utils.py --- a/pandera/check_utils.py +++ b/pandera/check_utils.py @@ -105,7 +105,7 @@ """ if ignore_na: isna = ( - check_obj.isna().any(axis="columns") + check_obj.isna().all(axis="columns") if isinstance(check_obj, pd.DataFrame) else check_obj.isna() )
{"golden_diff": "diff --git a/pandera/check_utils.py b/pandera/check_utils.py\n--- a/pandera/check_utils.py\n+++ b/pandera/check_utils.py\n@@ -105,7 +105,7 @@\n \"\"\"\n if ignore_na:\n isna = (\n- check_obj.isna().any(axis=\"columns\")\n+ check_obj.isna().all(axis=\"columns\")\n if isinstance(check_obj, pd.DataFrame)\n else check_obj.isna()\n )\n", "issue": "Implicit ignore_na=True behaviour causes custom dataframe checks to be ignored by default\n**Describe the bug**\r\nWhen using custom dataframe checks returning boolean series, `SchemaModel.validate()` does not report validation errors generated by a custom check if the record in question (or is it the entire dataframe?) contains null values. This behaviour is deafult and can be disabled by setting `ignore_na` to `False` (either via `@pa.dataframe_check(ignore_na=False)` or a Config entry such as `check_name = {\"ignore_na\": False}`). This default behaviour is surprising, and the root cause of my issue was discovered only after a lengthy debugging process with much help from @cosmicBboy. Perhaps this default behaviour could be changed or at least better documented, as Niels mentions that this has come up more than once previously.\r\n\r\n- [x] I have checked that this issue has not already been reported.\r\n- [x] I have confirmed this bug exists on the latest version of pandera.\r\n- [x] (optional) I have confirmed this bug exists on the master branch of pandera.\r\n\r\n\r\n#### Code Sample, a copy-pastable example\r\n\r\nCSV (DataFrame):\r\n```\r\nfield0,field1,field2\r\n,foo,foo\r\n```\r\n```python\r\[email protected]_check\r\ndef field1_does_not_equal_field2(cls, df: pd.DataFrame) -> Series[bool]:\r\n return df[\"field1\"] != df[\"field2\"]\r\n\r\n# Even though field1 == field2, validation passes because field0 is null\r\n```\r\n\r\n#### Expected behavior\r\nCreate a custom Pandera dataframe check returning a boolean series that accepts a CSV containing null values.\r\n\n", "before_files": [{"content": "\"\"\"Utility functions for validation.\"\"\"\n\nfrom functools import lru_cache\nfrom typing import NamedTuple, Optional, Tuple, Union\n\nimport pandas as pd\n\nSupportedTypes = NamedTuple(\n \"SupportedTypes\",\n (\n (\"table_types\", Tuple[type, ...]),\n (\"field_types\", Tuple[type, ...]),\n (\"index_types\", Tuple[type, ...]),\n (\"multiindex_types\", Tuple[type, ...]),\n ),\n)\n\n\n@lru_cache(maxsize=None)\ndef _supported_types():\n # pylint: disable=import-outside-toplevel\n table_types = [pd.DataFrame]\n field_types = [pd.Series]\n index_types = [pd.Index]\n multiindex_types = [pd.MultiIndex]\n\n try:\n import pyspark.pandas as ps\n\n table_types.append(ps.DataFrame)\n field_types.append(ps.Series)\n index_types.append(ps.Index)\n multiindex_types.append(ps.MultiIndex)\n except ImportError:\n pass\n try: # pragma: no cover\n import modin.pandas as mpd\n\n table_types.append(mpd.DataFrame)\n field_types.append(mpd.Series)\n index_types.append(mpd.Index)\n multiindex_types.append(mpd.MultiIndex)\n except ImportError:\n pass\n try:\n import dask.dataframe as dd\n\n table_types.append(dd.DataFrame)\n field_types.append(dd.Series)\n index_types.append(dd.Index)\n except ImportError:\n pass\n\n return SupportedTypes(\n tuple(table_types),\n tuple(field_types),\n tuple(index_types),\n tuple(multiindex_types),\n )\n\n\ndef is_table(obj):\n \"\"\"Verifies whether an object is table-like.\n\n Where a table is a 2-dimensional data matrix of rows and columns, which\n can be indexed in multiple different ways.\n \"\"\"\n return isinstance(obj, _supported_types().table_types)\n\n\ndef is_field(obj):\n \"\"\"Verifies whether an object is field-like.\n\n Where a field is a columnar representation of data in a table-like\n data structure.\n \"\"\"\n return isinstance(obj, _supported_types().field_types)\n\n\ndef is_index(obj):\n \"\"\"Verifies whether an object is a table index.\"\"\"\n return isinstance(obj, _supported_types().index_types)\n\n\ndef is_multiindex(obj):\n \"\"\"Verifies whether an object is a multi-level table index.\"\"\"\n return isinstance(obj, _supported_types().multiindex_types)\n\n\ndef is_supported_check_obj(obj):\n \"\"\"Verifies whether an object is table- or field-like.\"\"\"\n return is_table(obj) or is_field(obj)\n\n\ndef prepare_series_check_output(\n check_obj: Union[pd.Series, pd.DataFrame],\n check_output: pd.Series,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Prepare the check output and failure cases for a Series check output.\n\n check_obj can be a dataframe, since a check function can potentially return\n a Series resulting from applying some check function that outputs a Series.\n \"\"\"\n if ignore_na:\n isna = (\n check_obj.isna().any(axis=\"columns\")\n if isinstance(check_obj, pd.DataFrame)\n else check_obj.isna()\n )\n try:\n check_output = check_output | isna\n except AttributeError:\n # convert check_output to numpy for modin compatibility\n check_output = check_output.to_numpy() | isna\n\n failure_cases = check_obj[~check_output]\n if not failure_cases.empty and n_failure_cases is not None:\n # NOTE: this is a hack to support pyspark.pandas and modin, since you\n # can't use groupby on a dataframe with another dataframe\n if type(failure_cases).__module__.startswith(\"pyspark.pandas\") or type(\n failure_cases\n ).__module__.startswith(\"modin.pandas\"):\n failure_cases = (\n failure_cases.rename(\"failure_cases\")\n .to_frame()\n .assign(check_output=check_output)\n .groupby(\"check_output\")\n .head(n_failure_cases)[\"failure_cases\"]\n )\n else:\n failure_cases = failure_cases.groupby(check_output).head(\n n_failure_cases\n )\n return check_output, failure_cases\n\n\ndef prepare_dataframe_check_output(\n check_obj: pd.DataFrame,\n check_output: pd.DataFrame,\n df_orig: Optional[pd.DataFrame] = None,\n ignore_na: bool = True,\n n_failure_cases: Optional[int] = None,\n) -> Tuple[pd.Series, pd.Series]:\n \"\"\"Unstack a dataframe of boolean values.\n\n Check results consisting of a boolean dataframe should be reported at the\n most granular level.\n \"\"\"\n if df_orig is not None:\n assert df_orig.shape == check_output.shape\n\n if df_orig is None:\n df_orig = check_obj\n check_output = check_output.unstack()\n if ignore_na:\n check_output = check_output | df_orig.unstack().isna()\n failure_cases = (\n check_obj.unstack()[~check_output]\n .rename(\"failure_case\")\n .rename_axis([\"column\", \"index\"])\n .reset_index()\n )\n if not failure_cases.empty and n_failure_cases is not None:\n failure_cases = failure_cases.drop_duplicates().head(n_failure_cases)\n return check_output, failure_cases\n", "path": "pandera/check_utils.py"}]}
2,414
104
gh_patches_debug_17584
rasdani/github-patches
git_diff
geopandas__geopandas-379
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ENH: preserve column order in read_file Idea from http://gis.stackexchange.com/questions/216963/preserve-column-order-of-geopandas-file-read/217084#217084 For shapefiles, fiona saves the properties in the `meta` attribute as an OrderedDict, so we should be able to get the actual order of the columns in the file from that and rearrange the columns of the output accordingly. </issue> <code> [start of geopandas/io/file.py] 1 import os 2 3 import fiona 4 import numpy as np 5 from shapely.geometry import mapping 6 7 from six import iteritems 8 from geopandas import GeoDataFrame 9 10 11 def read_file(filename, **kwargs): 12 """ 13 Returns a GeoDataFrame from a file. 14 15 *filename* is either the absolute or relative path to the file to be 16 opened and *kwargs* are keyword args to be passed to the `open` method 17 in the fiona library when opening the file. For more information on 18 possible keywords, type: ``import fiona; help(fiona.open)`` 19 """ 20 bbox = kwargs.pop('bbox', None) 21 with fiona.open(filename, **kwargs) as f: 22 crs = f.crs 23 if bbox is not None: 24 assert len(bbox)==4 25 f_filt = f.filter(bbox=bbox) 26 else: 27 f_filt = f 28 gdf = GeoDataFrame.from_features(f_filt, crs=crs) 29 30 return gdf 31 32 33 def to_file(df, filename, driver="ESRI Shapefile", schema=None, 34 **kwargs): 35 """ 36 Write this GeoDataFrame to an OGR data source 37 38 A dictionary of supported OGR providers is available via: 39 >>> import fiona 40 >>> fiona.supported_drivers 41 42 Parameters 43 ---------- 44 df : GeoDataFrame to be written 45 filename : string 46 File path or file handle to write to. 47 driver : string, default 'ESRI Shapefile' 48 The OGR format driver used to write the vector file. 49 schema : dict, default None 50 If specified, the schema dictionary is passed to Fiona to 51 better control how the file is written. If None, GeoPandas 52 will determine the schema based on each column's dtype 53 54 The *kwargs* are passed to fiona.open and can be used to write 55 to multi-layer data, store data within archives (zip files), etc. 56 """ 57 if schema is None: 58 schema = infer_schema(df) 59 filename = os.path.abspath(os.path.expanduser(filename)) 60 with fiona.open(filename, 'w', driver=driver, crs=df.crs, 61 schema=schema, **kwargs) as c: 62 for feature in df.iterfeatures(): 63 c.write(feature) 64 65 66 def infer_schema(df): 67 try: 68 from collections import OrderedDict 69 except ImportError: 70 from ordereddict import OrderedDict 71 72 def convert_type(in_type): 73 if in_type == object: 74 return 'str' 75 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ 76 if out_type == 'long': 77 out_type = 'int' 78 return out_type 79 80 properties = OrderedDict([ 81 (col, convert_type(_type)) for col, _type in 82 zip(df.columns, df.dtypes) if col != df._geometry_column_name 83 ]) 84 85 geom_type = _common_geom_type(df) 86 if not geom_type: 87 raise ValueError("Geometry column cannot contain mutiple " 88 "geometry types when writing to file.") 89 90 schema = {'geometry': geom_type, 'properties': properties} 91 92 return schema 93 94 95 def _common_geom_type(df): 96 # Need to check geom_types before we write to file... 97 # Some (most?) providers expect a single geometry type: 98 # Point, LineString, or Polygon 99 geom_types = df.geometry.geom_type.unique() 100 101 from os.path import commonprefix # To find longest common prefix 102 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse 103 if not geom_type: 104 geom_type = None 105 106 return geom_type 107 [end of geopandas/io/file.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geopandas/io/file.py b/geopandas/io/file.py --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -14,7 +14,7 @@ *filename* is either the absolute or relative path to the file to be opened and *kwargs* are keyword args to be passed to the `open` method - in the fiona library when opening the file. For more information on + in the fiona library when opening the file. For more information on possible keywords, type: ``import fiona; help(fiona.open)`` """ bbox = kwargs.pop('bbox', None) @@ -27,6 +27,10 @@ f_filt = f gdf = GeoDataFrame.from_features(f_filt, crs=crs) + # re-order with column order from metadata, with geometry last + columns = list(f.meta["schema"]["properties"]) + ["geometry"] + gdf = gdf[columns] + return gdf
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -14,7 +14,7 @@\n \n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n- in the fiona library when opening the file. For more information on \n+ in the fiona library when opening the file. For more information on\n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n@@ -27,6 +27,10 @@\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n \n+ # re-order with column order from metadata, with geometry last\n+ columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n+ gdf = gdf[columns]\n+\n return gdf\n", "issue": "ENH: preserve column order in read_file\nIdea from http://gis.stackexchange.com/questions/216963/preserve-column-order-of-geopandas-file-read/217084#217084\r\n\r\nFor shapefiles, fiona saves the properties in the `meta` attribute as an OrderedDict, so we should be able to get the actual order of the columns in the file from that and rearrange the columns of the output accordingly.\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nfrom shapely.geometry import mapping\n\nfrom six import iteritems\nfrom geopandas import GeoDataFrame\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file.\n\n *filename* is either the absolute or relative path to the file to be\n opened and *kwargs* are keyword args to be passed to the `open` method\n in the fiona library when opening the file. For more information on \n possible keywords, type: ``import fiona; help(fiona.open)``\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n with fiona.open(filename, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox)==4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as c:\n for feature in df.iterfeatures():\n c.write(feature)\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
1,642
232
gh_patches_debug_949
rasdani/github-patches
git_diff
scikit-hep__pyhf-457
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Determine if papermill v1.0 API change is a problem # Description The [papermill `v1.0` release will introduce API breaking changes](https://github.com/nteract/papermill/blob/d554193bc458797b63af1f94964883d5dcca2418/README.md). It would be good to determine if these changes will matter for pyhf testing and require the addition of [scrapbook](https://nteract-scrapbook.readthedocs.io/en/latest/) or if the API change doesn't affect pyhf. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 from os import path 5 import sys 6 7 this_directory = path.abspath(path.dirname(__file__)) 8 if sys.version_info.major < 3: 9 from io import open 10 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: 11 long_description = readme_md.read() 12 13 extras_require = { 14 'tensorflow': [ 15 'tensorflow~=1.13', 16 'tensorflow-probability~=0.5', 17 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 18 'setuptools<=39.1.0', 19 ], 20 'torch': ['torch~=1.0'], 21 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'], 22 # 'dask': [ 23 # 'dask[array]' 24 # ], 25 'xmlio': ['uproot'], 26 'minuit': ['iminuit'], 27 'develop': [ 28 'pyflakes', 29 'pytest~=3.5', 30 'pytest-cov>=2.5.1', 31 'pytest-mock', 32 'pytest-benchmark[histogram]', 33 'pytest-console-scripts', 34 'python-coveralls', 35 'coverage>=4.0', # coveralls 36 'matplotlib', 37 'jupyter', 38 'nbdime', 39 'uproot~=3.3', 40 'papermill~=0.16', 41 'graphviz', 42 'bumpversion', 43 'sphinx', 44 'sphinxcontrib-bibtex', 45 'sphinxcontrib-napoleon', 46 'sphinx_rtd_theme', 47 'nbsphinx', 48 'sphinx-issues', 49 'm2r', 50 'jsonpatch', 51 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 52 'pre-commit', 53 'black;python_version>="3.6"', # Black is Python3 only 54 'twine', 55 ], 56 } 57 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 58 59 60 def _is_test_pypi(): 61 """ 62 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and 63 set to true (c.f. .travis.yml) 64 65 The use_scm_version kwarg accepts a callable for the local_scheme 66 configuration parameter with argument "version". This can be replaced 67 with a lambda as the desired version structure is {next_version}.dev{distance} 68 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy 69 70 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version 71 controlled through bumpversion is used. 72 """ 73 from os import getenv 74 75 return ( 76 {'local_scheme': lambda version: ''} 77 if getenv('TESTPYPI_UPLOAD') == 'true' 78 else False 79 ) 80 81 82 setup( 83 name='pyhf', 84 version='0.1.0', 85 description='(partial) pure python histfactory implementation', 86 long_description=long_description, 87 long_description_content_type='text/markdown', 88 url='https://github.com/diana-hep/pyhf', 89 author='Lukas Heinrich', 90 author_email='[email protected]', 91 license='Apache', 92 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 93 classifiers=[ 94 "Programming Language :: Python :: 2", 95 "Programming Language :: Python :: 2.7", 96 "Programming Language :: Python :: 3", 97 "Programming Language :: Python :: 3.6", 98 "Programming Language :: Python :: 3.7", 99 ], 100 packages=find_packages(), 101 include_package_data=True, 102 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 103 install_requires=[ 104 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 105 'click>=6.0', # for console scripts, 106 'tqdm', # for readxml 107 'six', # for modifiers 108 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 109 'jsonpatch', 110 ], 111 extras_require=extras_require, 112 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 113 dependency_links=[], 114 use_scm_version=_is_test_pypi(), 115 ) 116 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,7 +37,8 @@ 'jupyter', 'nbdime', 'uproot~=3.3', - 'papermill~=0.16', + 'papermill~=1.0', + 'nteract-scrapbook~=0.2', 'graphviz', 'bumpversion', 'sphinx',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,8 @@\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n- 'papermill~=0.16',\n+ 'papermill~=1.0',\n+ 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n", "issue": "Determine if papermill v1.0 API change is a problem\n# Description\r\n\r\nThe [papermill `v1.0` release will introduce API breaking changes](https://github.com/nteract/papermill/blob/d554193bc458797b63af1f94964883d5dcca2418/README.md). It would be good to determine if these changes will matter for pyhf testing and require the addition of [scrapbook](https://nteract-scrapbook.readthedocs.io/en/latest/) or if the API change doesn't affect pyhf.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow~=1.13',\n 'tensorflow-probability~=0.5',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch~=1.0'],\n 'mxnet': ['mxnet~=1.0', 'requests~=2.18.4', 'numpy<1.15.0,>=1.8.2'],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=0.16',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.1.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]}
1,945
103
gh_patches_debug_4048
rasdani/github-patches
git_diff
lutris__lutris-1136
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> "Manage Versions` window is too narrow Recent improvements to Wine version managing are currently a bit pointless due to Manage Versions windows being too narrow and not being able to display the status of the downloading process: ![peek 2018-09-29 11-37](https://user-images.githubusercontent.com/10602045/46243364-21144c80-c3dc-11e8-86b0-5356d707c7d7.gif) </issue> <code> [start of lutris/gui/runnerinstalldialog.py] 1 # pylint: disable=missing-docstring 2 import os 3 import random 4 5 from gi.repository import GLib, GObject, Gtk 6 from lutris import api, settings 7 from lutris.gui.dialogs import ErrorDialog, QuestionDialog 8 from lutris.gui.widgets.dialogs import Dialog 9 from lutris.util import jobs, system 10 from lutris.util.downloader import Downloader 11 from lutris.util.extract import extract_archive 12 from lutris.util.log import logger 13 14 15 class RunnerInstallDialog(Dialog): 16 COL_VER = 0 17 COL_ARCH = 1 18 COL_URL = 2 19 COL_INSTALLED = 3 20 COL_PROGRESS = 4 21 22 def __init__(self, title, parent, runner): 23 super(RunnerInstallDialog, self).__init__( 24 title, parent, 0, ('_OK', Gtk.ResponseType.OK) 25 ) 26 width, height = (340, 380) 27 self.dialog_size = (width, height) 28 self.set_default_size(width, height) 29 30 self.runner = runner 31 self.runner_info = api.get_runners(self.runner) 32 if not self.runner_info: 33 ErrorDialog('Unable to get runner versions, check your internet connection', 34 parent=parent) 35 return 36 label = Gtk.Label("%s version management" % self.runner_info['name']) 37 self.vbox.add(label) 38 self.runner_store = self.get_store() 39 scrolled_window = Gtk.ScrolledWindow() 40 self.treeview = self.get_treeview(self.runner_store) 41 self.installing = {} 42 self.connect('response', self.on_response) 43 44 scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, 45 Gtk.PolicyType.AUTOMATIC) 46 scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_OUT) 47 scrolled_window.add(self.treeview) 48 49 self.vbox.pack_start(scrolled_window, True, True, 14) 50 self.show_all() 51 52 def get_treeview(self, model): 53 treeview = Gtk.TreeView(model=model) 54 treeview.set_headers_visible(False) 55 56 renderer_toggle = Gtk.CellRendererToggle() 57 renderer_text = Gtk.CellRendererText() 58 self.renderer_progress = Gtk.CellRendererProgress() 59 60 installed_column = Gtk.TreeViewColumn(None, renderer_toggle, active=3) 61 renderer_toggle.connect("toggled", self.on_installed_toggled) 62 treeview.append_column(installed_column) 63 64 version_column = Gtk.TreeViewColumn(None, renderer_text) 65 version_column.add_attribute(renderer_text, 'text', self.COL_VER) 66 version_column.set_property('min-width', 80) 67 treeview.append_column(version_column) 68 69 arch_column = Gtk.TreeViewColumn(None, renderer_text, 70 text=self.COL_ARCH) 71 arch_column.set_property('min-width', 50) 72 treeview.append_column(arch_column) 73 74 progress_column = Gtk.TreeViewColumn(None, self.renderer_progress, 75 value=self.COL_PROGRESS, 76 visible=self.COL_PROGRESS) 77 progress_column.set_property('fixed-width', 60) 78 progress_column.set_property('min-width', 60) 79 progress_column.set_property('resizable', False) 80 treeview.append_column(progress_column) 81 82 return treeview 83 84 def get_store(self): 85 liststore = Gtk.ListStore(str, str, str, bool, int) 86 for version_info in reversed(self.get_versions()): 87 version = version_info['version'] 88 architecture = version_info['architecture'] 89 progress = 0 90 is_installed = os.path.exists( 91 self.get_runner_path(version, architecture) 92 ) 93 liststore.append( 94 [version_info['version'], 95 version_info['architecture'], 96 version_info['url'], 97 is_installed, 98 progress] 99 ) 100 return liststore 101 102 def get_versions(self): 103 return self.runner_info['versions'] 104 105 def get_runner_path(self, version, arch): 106 return os.path.join(settings.RUNNER_DIR, self.runner, 107 "{}-{}".format(version, arch)) 108 109 def get_dest_path(self, row): 110 url = row[2] 111 filename = os.path.basename(url) 112 return os.path.join(settings.CACHE_DIR, filename) 113 114 def on_installed_toggled(self, widget, path): 115 row = self.runner_store[path] 116 if row[self.COL_VER] in self.installing: 117 confirm_dlg = QuestionDialog({ 118 "question": "Do you want to cancel the download?", 119 "title": "Download starting" 120 }) 121 if confirm_dlg.result == confirm_dlg.YES: 122 self.cancel_install(row) 123 elif row[self.COL_INSTALLED]: 124 self.uninstall_runner(row) 125 else: 126 self.install_runner(row) 127 128 def cancel_install(self, row): 129 self.installing[row[self.COL_VER]].cancel() 130 self.uninstall_runner(row) 131 row[self.COL_PROGRESS] = 0 132 self.installing.pop(row[self.COL_VER]) 133 134 def uninstall_runner(self, row): 135 version = row[self.COL_VER] 136 arch = row[self.COL_ARCH] 137 system.remove_folder(self.get_runner_path(version, arch)) 138 row[self.COL_INSTALLED] = False 139 140 def install_runner(self, row): 141 url = row[2] 142 logger.debug("Downloading %s", url) 143 dest_path = self.get_dest_path(row) 144 downloader = Downloader(url, dest_path, overwrite=True) 145 GLib.timeout_add(100, self.get_progress, downloader, row) 146 self.installing[row[self.COL_VER]] = downloader 147 downloader.start() 148 149 def get_progress(self, downloader, row): 150 if downloader.state == downloader.CANCELLED: 151 return False 152 if downloader.state == downloader.ERROR: 153 self.cancel_install(row) 154 return False 155 downloader.check_progress() 156 percent_downloaded = downloader.progress_percentage 157 if percent_downloaded >= 1: 158 row[4] = percent_downloaded 159 self.renderer_progress.props.pulse = -1 160 self.renderer_progress.props.text = "%d %%" % int(percent_downloaded) 161 else: 162 row[4] = 1 163 self.renderer_progress.props.pulse = random.randint(1, 100) 164 self.renderer_progress.props.text = "Downloading…" 165 if downloader.state == downloader.COMPLETED: 166 row[4] = 99 167 self.renderer_progress.props.text = "Extracting…" 168 self.on_runner_downloaded(row) 169 return False 170 return True 171 172 def on_runner_downloaded(self, row): 173 version = row[0] 174 architecture = row[1] 175 src = self.get_dest_path(row) 176 dst = self.get_runner_path(version, architecture) 177 jobs.AsyncCall(self.extract, self.on_extracted, src, dst, row) 178 179 def extract(self, src, dst, row): 180 extract_archive(src, dst) 181 return src, row 182 183 def on_extracted(self, xxx_todo_changeme, error): 184 (src, row) = xxx_todo_changeme 185 os.remove(src) 186 row[self.COL_PROGRESS] = 0 187 row[self.COL_INSTALLED] = True 188 self.renderer_progress.props.text = "" 189 self.installing.pop(row[self.COL_VER]) 190 191 def on_response(self, dialog, response): 192 self.destroy() 193 194 195 if __name__ == "__main__": 196 import signal 197 signal.signal(signal.SIGINT, signal.SIG_DFL) 198 RunnerInstallDialog("test", None, "wine") 199 GObject.threads_init() 200 Gtk.main() 201 [end of lutris/gui/runnerinstalldialog.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/gui/runnerinstalldialog.py b/lutris/gui/runnerinstalldialog.py --- a/lutris/gui/runnerinstalldialog.py +++ b/lutris/gui/runnerinstalldialog.py @@ -23,7 +23,7 @@ super(RunnerInstallDialog, self).__init__( title, parent, 0, ('_OK', Gtk.ResponseType.OK) ) - width, height = (340, 380) + width, height = (460, 380) self.dialog_size = (width, height) self.set_default_size(width, height)
{"golden_diff": "diff --git a/lutris/gui/runnerinstalldialog.py b/lutris/gui/runnerinstalldialog.py\n--- a/lutris/gui/runnerinstalldialog.py\n+++ b/lutris/gui/runnerinstalldialog.py\n@@ -23,7 +23,7 @@\n super(RunnerInstallDialog, self).__init__(\n title, parent, 0, ('_OK', Gtk.ResponseType.OK)\n )\n- width, height = (340, 380)\n+ width, height = (460, 380)\n self.dialog_size = (width, height)\n self.set_default_size(width, height)\n", "issue": "\"Manage Versions` window is too narrow\nRecent improvements to Wine version managing are currently a bit pointless due to Manage Versions windows being too narrow and not being able to display the status of the downloading process:\r\n![peek 2018-09-29 11-37](https://user-images.githubusercontent.com/10602045/46243364-21144c80-c3dc-11e8-86b0-5356d707c7d7.gif)\r\n\r\n\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nimport os\nimport random\n\nfrom gi.repository import GLib, GObject, Gtk\nfrom lutris import api, settings\nfrom lutris.gui.dialogs import ErrorDialog, QuestionDialog\nfrom lutris.gui.widgets.dialogs import Dialog\nfrom lutris.util import jobs, system\nfrom lutris.util.downloader import Downloader\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.log import logger\n\n\nclass RunnerInstallDialog(Dialog):\n COL_VER = 0\n COL_ARCH = 1\n COL_URL = 2\n COL_INSTALLED = 3\n COL_PROGRESS = 4\n\n def __init__(self, title, parent, runner):\n super(RunnerInstallDialog, self).__init__(\n title, parent, 0, ('_OK', Gtk.ResponseType.OK)\n )\n width, height = (340, 380)\n self.dialog_size = (width, height)\n self.set_default_size(width, height)\n\n self.runner = runner\n self.runner_info = api.get_runners(self.runner)\n if not self.runner_info:\n ErrorDialog('Unable to get runner versions, check your internet connection',\n parent=parent)\n return\n label = Gtk.Label(\"%s version management\" % self.runner_info['name'])\n self.vbox.add(label)\n self.runner_store = self.get_store()\n scrolled_window = Gtk.ScrolledWindow()\n self.treeview = self.get_treeview(self.runner_store)\n self.installing = {}\n self.connect('response', self.on_response)\n\n scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC,\n Gtk.PolicyType.AUTOMATIC)\n scrolled_window.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)\n scrolled_window.add(self.treeview)\n\n self.vbox.pack_start(scrolled_window, True, True, 14)\n self.show_all()\n\n def get_treeview(self, model):\n treeview = Gtk.TreeView(model=model)\n treeview.set_headers_visible(False)\n\n renderer_toggle = Gtk.CellRendererToggle()\n renderer_text = Gtk.CellRendererText()\n self.renderer_progress = Gtk.CellRendererProgress()\n\n installed_column = Gtk.TreeViewColumn(None, renderer_toggle, active=3)\n renderer_toggle.connect(\"toggled\", self.on_installed_toggled)\n treeview.append_column(installed_column)\n\n version_column = Gtk.TreeViewColumn(None, renderer_text)\n version_column.add_attribute(renderer_text, 'text', self.COL_VER)\n version_column.set_property('min-width', 80)\n treeview.append_column(version_column)\n\n arch_column = Gtk.TreeViewColumn(None, renderer_text,\n text=self.COL_ARCH)\n arch_column.set_property('min-width', 50)\n treeview.append_column(arch_column)\n\n progress_column = Gtk.TreeViewColumn(None, self.renderer_progress,\n value=self.COL_PROGRESS,\n visible=self.COL_PROGRESS)\n progress_column.set_property('fixed-width', 60)\n progress_column.set_property('min-width', 60)\n progress_column.set_property('resizable', False)\n treeview.append_column(progress_column)\n\n return treeview\n\n def get_store(self):\n liststore = Gtk.ListStore(str, str, str, bool, int)\n for version_info in reversed(self.get_versions()):\n version = version_info['version']\n architecture = version_info['architecture']\n progress = 0\n is_installed = os.path.exists(\n self.get_runner_path(version, architecture)\n )\n liststore.append(\n [version_info['version'],\n version_info['architecture'],\n version_info['url'],\n is_installed,\n progress]\n )\n return liststore\n\n def get_versions(self):\n return self.runner_info['versions']\n\n def get_runner_path(self, version, arch):\n return os.path.join(settings.RUNNER_DIR, self.runner,\n \"{}-{}\".format(version, arch))\n\n def get_dest_path(self, row):\n url = row[2]\n filename = os.path.basename(url)\n return os.path.join(settings.CACHE_DIR, filename)\n\n def on_installed_toggled(self, widget, path):\n row = self.runner_store[path]\n if row[self.COL_VER] in self.installing:\n confirm_dlg = QuestionDialog({\n \"question\": \"Do you want to cancel the download?\",\n \"title\": \"Download starting\"\n })\n if confirm_dlg.result == confirm_dlg.YES:\n self.cancel_install(row)\n elif row[self.COL_INSTALLED]:\n self.uninstall_runner(row)\n else:\n self.install_runner(row)\n\n def cancel_install(self, row):\n self.installing[row[self.COL_VER]].cancel()\n self.uninstall_runner(row)\n row[self.COL_PROGRESS] = 0\n self.installing.pop(row[self.COL_VER])\n\n def uninstall_runner(self, row):\n version = row[self.COL_VER]\n arch = row[self.COL_ARCH]\n system.remove_folder(self.get_runner_path(version, arch))\n row[self.COL_INSTALLED] = False\n\n def install_runner(self, row):\n url = row[2]\n logger.debug(\"Downloading %s\", url)\n dest_path = self.get_dest_path(row)\n downloader = Downloader(url, dest_path, overwrite=True)\n GLib.timeout_add(100, self.get_progress, downloader, row)\n self.installing[row[self.COL_VER]] = downloader\n downloader.start()\n\n def get_progress(self, downloader, row):\n if downloader.state == downloader.CANCELLED:\n return False\n if downloader.state == downloader.ERROR:\n self.cancel_install(row)\n return False\n downloader.check_progress()\n percent_downloaded = downloader.progress_percentage\n if percent_downloaded >= 1:\n row[4] = percent_downloaded\n self.renderer_progress.props.pulse = -1\n self.renderer_progress.props.text = \"%d %%\" % int(percent_downloaded)\n else:\n row[4] = 1\n self.renderer_progress.props.pulse = random.randint(1, 100)\n self.renderer_progress.props.text = \"Downloading\u2026\"\n if downloader.state == downloader.COMPLETED:\n row[4] = 99\n self.renderer_progress.props.text = \"Extracting\u2026\"\n self.on_runner_downloaded(row)\n return False\n return True\n\n def on_runner_downloaded(self, row):\n version = row[0]\n architecture = row[1]\n src = self.get_dest_path(row)\n dst = self.get_runner_path(version, architecture)\n jobs.AsyncCall(self.extract, self.on_extracted, src, dst, row)\n\n def extract(self, src, dst, row):\n extract_archive(src, dst)\n return src, row\n\n def on_extracted(self, xxx_todo_changeme, error):\n (src, row) = xxx_todo_changeme\n os.remove(src)\n row[self.COL_PROGRESS] = 0\n row[self.COL_INSTALLED] = True\n self.renderer_progress.props.text = \"\"\n self.installing.pop(row[self.COL_VER])\n\n def on_response(self, dialog, response):\n self.destroy()\n\n\nif __name__ == \"__main__\":\n import signal\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n RunnerInstallDialog(\"test\", None, \"wine\")\n GObject.threads_init()\n Gtk.main()\n", "path": "lutris/gui/runnerinstalldialog.py"}]}
2,735
149
gh_patches_debug_37740
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-6719
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The mitmproxy program failed to start because the default port 8080 was occupied. #### Problem Description Because the default port 8080 is occupied, the mitmproxy program fails to start, and there is no output reason for the failure. #### Steps to reproduce the behavior: 1. Listen on port 8080 using the nc command in a terminal window. 2. Start the mitmproxy program in another terminal window. 3. The mitmproxy program failed to start, and there was no output reason for the failure, and the normal terminal configuration was not restored. ![image](https://github.com/mitmproxy/mitmproxy/assets/32811372/e8333100-fceb-4fdb-b781-829a9ce89945) #### System Information Mitmproxy: 10.2.3 binary Python: 3.12.2 OpenSSL: OpenSSL 3.2.1 30 Jan 2024 Platform: macOS-14.2.1-arm64-arm-64bit </issue> <code> [start of mitmproxy/master.py] 1 import asyncio 2 import logging 3 4 from . import ctx as mitmproxy_ctx 5 from .addons import termlog 6 from .proxy.mode_specs import ReverseMode 7 from .utils import asyncio_utils 8 from mitmproxy import addonmanager 9 from mitmproxy import command 10 from mitmproxy import eventsequence 11 from mitmproxy import hooks 12 from mitmproxy import http 13 from mitmproxy import log 14 from mitmproxy import options 15 16 logger = logging.getLogger(__name__) 17 18 19 class Master: 20 """ 21 The master handles mitmproxy's main event loop. 22 """ 23 24 event_loop: asyncio.AbstractEventLoop 25 _termlog_addon: termlog.TermLog | None = None 26 27 def __init__( 28 self, 29 opts: options.Options, 30 event_loop: asyncio.AbstractEventLoop | None = None, 31 with_termlog: bool = False, 32 ): 33 self.options: options.Options = opts or options.Options() 34 self.commands = command.CommandManager(self) 35 self.addons = addonmanager.AddonManager(self) 36 37 if with_termlog: 38 self._termlog_addon = termlog.TermLog() 39 self.addons.add(self._termlog_addon) 40 41 self.log = log.Log(self) # deprecated, do not use. 42 self._legacy_log_events = log.LegacyLogEvents(self) 43 self._legacy_log_events.install() 44 45 # We expect an active event loop here already because some addons 46 # may want to spawn tasks during the initial configuration phase, 47 # which happens before run(). 48 self.event_loop = event_loop or asyncio.get_running_loop() 49 self.should_exit = asyncio.Event() 50 mitmproxy_ctx.master = self 51 mitmproxy_ctx.log = self.log # deprecated, do not use. 52 mitmproxy_ctx.options = self.options 53 54 async def run(self) -> None: 55 with ( 56 asyncio_utils.install_exception_handler(self._asyncio_exception_handler), 57 asyncio_utils.set_eager_task_factory(), 58 ): 59 self.should_exit.clear() 60 61 if ec := self.addons.get("errorcheck"): 62 await ec.shutdown_if_errored() 63 if ps := self.addons.get("proxyserver"): 64 # This may block for some proxy modes, so we also monitor should_exit. 65 await asyncio.wait( 66 [ 67 asyncio.create_task(ps.setup_servers()), 68 asyncio.create_task(self.should_exit.wait()), 69 ], 70 return_when=asyncio.FIRST_COMPLETED, 71 ) 72 await self.running() 73 if ec := self.addons.get("errorcheck"): 74 await ec.shutdown_if_errored() 75 ec.finish() 76 try: 77 await self.should_exit.wait() 78 finally: 79 # .wait might be cancelled (e.g. by sys.exit) 80 await self.done() 81 82 def shutdown(self): 83 """ 84 Shut down the proxy. This method is thread-safe. 85 """ 86 # We may add an exception argument here. 87 self.event_loop.call_soon_threadsafe(self.should_exit.set) 88 89 async def running(self) -> None: 90 await self.addons.trigger_event(hooks.RunningHook()) 91 92 async def done(self) -> None: 93 await self.addons.trigger_event(hooks.DoneHook()) 94 self._legacy_log_events.uninstall() 95 if self._termlog_addon is not None: 96 self._termlog_addon.uninstall() 97 98 def _asyncio_exception_handler(self, loop, context) -> None: 99 try: 100 exc: Exception = context["exception"] 101 except KeyError: 102 logger.error(f"Unhandled asyncio error: {context}") 103 else: 104 if isinstance(exc, OSError) and exc.errno == 10038: 105 return # suppress https://bugs.python.org/issue43253 106 logger.error( 107 "Unhandled error in task.", 108 exc_info=(type(exc), exc, exc.__traceback__), 109 ) 110 111 async def load_flow(self, f): 112 """ 113 Loads a flow 114 """ 115 116 if ( 117 isinstance(f, http.HTTPFlow) 118 and len(self.options.mode) == 1 119 and self.options.mode[0].startswith("reverse:") 120 ): 121 # When we load flows in reverse proxy mode, we adjust the target host to 122 # the reverse proxy destination for all flows we load. This makes it very 123 # easy to replay saved flows against a different host. 124 # We may change this in the future so that clientplayback always replays to the first mode. 125 mode = ReverseMode.parse(self.options.mode[0]) 126 assert isinstance(mode, ReverseMode) 127 f.request.host, f.request.port, *_ = mode.address 128 f.request.scheme = mode.scheme 129 130 for e in eventsequence.iterate(f): 131 await self.addons.handle_lifecycle(e) 132 [end of mitmproxy/master.py] [start of mitmproxy/addons/errorcheck.py] 1 import asyncio 2 import logging 3 import sys 4 5 from mitmproxy import log 6 7 8 class ErrorCheck: 9 """Monitor startup for error log entries, and terminate immediately if there are some.""" 10 11 repeat_errors_on_stderr: bool 12 """ 13 Repeat all errors on stderr before exiting. 14 This is useful for the console UI, which otherwise swallows all output. 15 """ 16 17 def __init__(self, repeat_errors_on_stderr: bool = False) -> None: 18 self.repeat_errors_on_stderr = repeat_errors_on_stderr 19 20 self.logger = ErrorCheckHandler() 21 self.logger.install() 22 23 def finish(self): 24 self.logger.uninstall() 25 26 async def shutdown_if_errored(self): 27 # don't run immediately, wait for all logging tasks to finish. 28 await asyncio.sleep(0) 29 if self.logger.has_errored: 30 plural = "s" if len(self.logger.has_errored) > 1 else "" 31 if self.repeat_errors_on_stderr: 32 msg = "\n".join(self.logger.format(r) for r in self.logger.has_errored) 33 print(f"Error{plural} logged during startup:\n{msg}", file=sys.stderr) 34 else: 35 print( 36 f"Error{plural} logged during startup, exiting...", file=sys.stderr 37 ) 38 39 sys.exit(1) 40 41 42 class ErrorCheckHandler(log.MitmLogHandler): 43 def __init__(self) -> None: 44 super().__init__(logging.ERROR) 45 self.has_errored: list[logging.LogRecord] = [] 46 47 def emit(self, record: logging.LogRecord) -> None: 48 self.has_errored.append(record) 49 [end of mitmproxy/addons/errorcheck.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/addons/errorcheck.py b/mitmproxy/addons/errorcheck.py --- a/mitmproxy/addons/errorcheck.py +++ b/mitmproxy/addons/errorcheck.py @@ -3,6 +3,8 @@ import sys from mitmproxy import log +from mitmproxy.contrib import click as miniclick +from mitmproxy.utils import vt_codes class ErrorCheck: @@ -29,8 +31,13 @@ if self.logger.has_errored: plural = "s" if len(self.logger.has_errored) > 1 else "" if self.repeat_errors_on_stderr: - msg = "\n".join(self.logger.format(r) for r in self.logger.has_errored) - print(f"Error{plural} logged during startup:\n{msg}", file=sys.stderr) + message = f"Error{plural} logged during startup:" + if vt_codes.ensure_supported(sys.stderr): # pragma: no cover + message = miniclick.style(message, fg="red") + details = "\n".join( + self.logger.format(r) for r in self.logger.has_errored + ) + print(f"{message}\n{details}", file=sys.stderr) else: print( f"Error{plural} logged during startup, exiting...", file=sys.stderr diff --git a/mitmproxy/master.py b/mitmproxy/master.py --- a/mitmproxy/master.py +++ b/mitmproxy/master.py @@ -58,6 +58,7 @@ ): self.should_exit.clear() + # Can we exit before even bringing up servers? if ec := self.addons.get("errorcheck"): await ec.shutdown_if_errored() if ps := self.addons.get("proxyserver"): @@ -69,14 +70,23 @@ ], return_when=asyncio.FIRST_COMPLETED, ) - await self.running() - if ec := self.addons.get("errorcheck"): - await ec.shutdown_if_errored() - ec.finish() + if self.should_exit.is_set(): + return + # Did bringing up servers fail? + if ec := self.addons.get("errorcheck"): + await ec.shutdown_if_errored() + try: + await self.running() + # Any errors in the final part of startup? + if ec := self.addons.get("errorcheck"): + await ec.shutdown_if_errored() + ec.finish() + await self.should_exit.wait() finally: - # .wait might be cancelled (e.g. by sys.exit) + # if running() was called, we also always want to call done(). + # .wait might be cancelled (e.g. by sys.exit), so this needs to be in a finally block. await self.done() def shutdown(self):
{"golden_diff": "diff --git a/mitmproxy/addons/errorcheck.py b/mitmproxy/addons/errorcheck.py\n--- a/mitmproxy/addons/errorcheck.py\n+++ b/mitmproxy/addons/errorcheck.py\n@@ -3,6 +3,8 @@\n import sys\n \n from mitmproxy import log\n+from mitmproxy.contrib import click as miniclick\n+from mitmproxy.utils import vt_codes\n \n \n class ErrorCheck:\n@@ -29,8 +31,13 @@\n if self.logger.has_errored:\n plural = \"s\" if len(self.logger.has_errored) > 1 else \"\"\n if self.repeat_errors_on_stderr:\n- msg = \"\\n\".join(self.logger.format(r) for r in self.logger.has_errored)\n- print(f\"Error{plural} logged during startup:\\n{msg}\", file=sys.stderr)\n+ message = f\"Error{plural} logged during startup:\"\n+ if vt_codes.ensure_supported(sys.stderr): # pragma: no cover\n+ message = miniclick.style(message, fg=\"red\")\n+ details = \"\\n\".join(\n+ self.logger.format(r) for r in self.logger.has_errored\n+ )\n+ print(f\"{message}\\n{details}\", file=sys.stderr)\n else:\n print(\n f\"Error{plural} logged during startup, exiting...\", file=sys.stderr\ndiff --git a/mitmproxy/master.py b/mitmproxy/master.py\n--- a/mitmproxy/master.py\n+++ b/mitmproxy/master.py\n@@ -58,6 +58,7 @@\n ):\n self.should_exit.clear()\n \n+ # Can we exit before even bringing up servers?\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n if ps := self.addons.get(\"proxyserver\"):\n@@ -69,14 +70,23 @@\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n- await self.running()\n- if ec := self.addons.get(\"errorcheck\"):\n- await ec.shutdown_if_errored()\n- ec.finish()\n+ if self.should_exit.is_set():\n+ return\n+ # Did bringing up servers fail?\n+ if ec := self.addons.get(\"errorcheck\"):\n+ await ec.shutdown_if_errored()\n+\n try:\n+ await self.running()\n+ # Any errors in the final part of startup?\n+ if ec := self.addons.get(\"errorcheck\"):\n+ await ec.shutdown_if_errored()\n+ ec.finish()\n+\n await self.should_exit.wait()\n finally:\n- # .wait might be cancelled (e.g. by sys.exit)\n+ # if running() was called, we also always want to call done().\n+ # .wait might be cancelled (e.g. by sys.exit), so this needs to be in a finally block.\n await self.done()\n \n def shutdown(self):\n", "issue": "The mitmproxy program failed to start because the default port 8080 was occupied.\n#### Problem Description\r\nBecause the default port 8080 is occupied, the mitmproxy program fails to start, and there is no output reason for the failure.\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Listen on port 8080 using the nc command in a terminal window.\r\n2. Start the mitmproxy program in another terminal window.\r\n3. The mitmproxy program failed to start, and there was no output reason for the failure, and the normal terminal configuration was not restored.\r\n![image](https://github.com/mitmproxy/mitmproxy/assets/32811372/e8333100-fceb-4fdb-b781-829a9ce89945)\r\n\r\n\r\n\r\n#### System Information\r\nMitmproxy: 10.2.3 binary\r\nPython: 3.12.2\r\nOpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\nPlatform: macOS-14.2.1-arm64-arm-64bit\r\n\n", "before_files": [{"content": "import asyncio\nimport logging\n\nfrom . import ctx as mitmproxy_ctx\nfrom .addons import termlog\nfrom .proxy.mode_specs import ReverseMode\nfrom .utils import asyncio_utils\nfrom mitmproxy import addonmanager\nfrom mitmproxy import command\nfrom mitmproxy import eventsequence\nfrom mitmproxy import hooks\nfrom mitmproxy import http\nfrom mitmproxy import log\nfrom mitmproxy import options\n\nlogger = logging.getLogger(__name__)\n\n\nclass Master:\n \"\"\"\n The master handles mitmproxy's main event loop.\n \"\"\"\n\n event_loop: asyncio.AbstractEventLoop\n _termlog_addon: termlog.TermLog | None = None\n\n def __init__(\n self,\n opts: options.Options,\n event_loop: asyncio.AbstractEventLoop | None = None,\n with_termlog: bool = False,\n ):\n self.options: options.Options = opts or options.Options()\n self.commands = command.CommandManager(self)\n self.addons = addonmanager.AddonManager(self)\n\n if with_termlog:\n self._termlog_addon = termlog.TermLog()\n self.addons.add(self._termlog_addon)\n\n self.log = log.Log(self) # deprecated, do not use.\n self._legacy_log_events = log.LegacyLogEvents(self)\n self._legacy_log_events.install()\n\n # We expect an active event loop here already because some addons\n # may want to spawn tasks during the initial configuration phase,\n # which happens before run().\n self.event_loop = event_loop or asyncio.get_running_loop()\n self.should_exit = asyncio.Event()\n mitmproxy_ctx.master = self\n mitmproxy_ctx.log = self.log # deprecated, do not use.\n mitmproxy_ctx.options = self.options\n\n async def run(self) -> None:\n with (\n asyncio_utils.install_exception_handler(self._asyncio_exception_handler),\n asyncio_utils.set_eager_task_factory(),\n ):\n self.should_exit.clear()\n\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n if ps := self.addons.get(\"proxyserver\"):\n # This may block for some proxy modes, so we also monitor should_exit.\n await asyncio.wait(\n [\n asyncio.create_task(ps.setup_servers()),\n asyncio.create_task(self.should_exit.wait()),\n ],\n return_when=asyncio.FIRST_COMPLETED,\n )\n await self.running()\n if ec := self.addons.get(\"errorcheck\"):\n await ec.shutdown_if_errored()\n ec.finish()\n try:\n await self.should_exit.wait()\n finally:\n # .wait might be cancelled (e.g. by sys.exit)\n await self.done()\n\n def shutdown(self):\n \"\"\"\n Shut down the proxy. This method is thread-safe.\n \"\"\"\n # We may add an exception argument here.\n self.event_loop.call_soon_threadsafe(self.should_exit.set)\n\n async def running(self) -> None:\n await self.addons.trigger_event(hooks.RunningHook())\n\n async def done(self) -> None:\n await self.addons.trigger_event(hooks.DoneHook())\n self._legacy_log_events.uninstall()\n if self._termlog_addon is not None:\n self._termlog_addon.uninstall()\n\n def _asyncio_exception_handler(self, loop, context) -> None:\n try:\n exc: Exception = context[\"exception\"]\n except KeyError:\n logger.error(f\"Unhandled asyncio error: {context}\")\n else:\n if isinstance(exc, OSError) and exc.errno == 10038:\n return # suppress https://bugs.python.org/issue43253\n logger.error(\n \"Unhandled error in task.\",\n exc_info=(type(exc), exc, exc.__traceback__),\n )\n\n async def load_flow(self, f):\n \"\"\"\n Loads a flow\n \"\"\"\n\n if (\n isinstance(f, http.HTTPFlow)\n and len(self.options.mode) == 1\n and self.options.mode[0].startswith(\"reverse:\")\n ):\n # When we load flows in reverse proxy mode, we adjust the target host to\n # the reverse proxy destination for all flows we load. This makes it very\n # easy to replay saved flows against a different host.\n # We may change this in the future so that clientplayback always replays to the first mode.\n mode = ReverseMode.parse(self.options.mode[0])\n assert isinstance(mode, ReverseMode)\n f.request.host, f.request.port, *_ = mode.address\n f.request.scheme = mode.scheme\n\n for e in eventsequence.iterate(f):\n await self.addons.handle_lifecycle(e)\n", "path": "mitmproxy/master.py"}, {"content": "import asyncio\nimport logging\nimport sys\n\nfrom mitmproxy import log\n\n\nclass ErrorCheck:\n \"\"\"Monitor startup for error log entries, and terminate immediately if there are some.\"\"\"\n\n repeat_errors_on_stderr: bool\n \"\"\"\n Repeat all errors on stderr before exiting.\n This is useful for the console UI, which otherwise swallows all output.\n \"\"\"\n\n def __init__(self, repeat_errors_on_stderr: bool = False) -> None:\n self.repeat_errors_on_stderr = repeat_errors_on_stderr\n\n self.logger = ErrorCheckHandler()\n self.logger.install()\n\n def finish(self):\n self.logger.uninstall()\n\n async def shutdown_if_errored(self):\n # don't run immediately, wait for all logging tasks to finish.\n await asyncio.sleep(0)\n if self.logger.has_errored:\n plural = \"s\" if len(self.logger.has_errored) > 1 else \"\"\n if self.repeat_errors_on_stderr:\n msg = \"\\n\".join(self.logger.format(r) for r in self.logger.has_errored)\n print(f\"Error{plural} logged during startup:\\n{msg}\", file=sys.stderr)\n else:\n print(\n f\"Error{plural} logged during startup, exiting...\", file=sys.stderr\n )\n\n sys.exit(1)\n\n\nclass ErrorCheckHandler(log.MitmLogHandler):\n def __init__(self) -> None:\n super().__init__(logging.ERROR)\n self.has_errored: list[logging.LogRecord] = []\n\n def emit(self, record: logging.LogRecord) -> None:\n self.has_errored.append(record)\n", "path": "mitmproxy/addons/errorcheck.py"}]}
2,532
635
gh_patches_debug_214
rasdani/github-patches
git_diff
DDMAL__CantusDB-328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Representation of Notation objects When I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: ![Screen Shot 2022-09-06 at 08 59 56](https://user-images.githubusercontent.com/58090591/188641630-13f87954-0f92-405f-a561-effe3075a3db.png) Notation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible. Larger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources? </issue> <code> [start of django/cantusdb_project/main_app/models/notation.py] 1 from django.db import models 2 from main_app.models import BaseModel 3 4 5 class Notation(BaseModel): 6 name = models.CharField(max_length=63) 7 [end of django/cantusdb_project/main_app/models/notation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py --- a/django/cantusdb_project/main_app/models/notation.py +++ b/django/cantusdb_project/main_app/models/notation.py @@ -4,3 +4,5 @@ class Notation(BaseModel): name = models.CharField(max_length=63) + def __str__(self): + return f"{self.name} ({self.id})" \ No newline at end of file
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/models/notation.py b/django/cantusdb_project/main_app/models/notation.py\n--- a/django/cantusdb_project/main_app/models/notation.py\n+++ b/django/cantusdb_project/main_app/models/notation.py\n@@ -4,3 +4,5 @@\n \n class Notation(BaseModel):\n name = models.CharField(max_length=63)\n+ def __str__(self):\n+ return f\"{self.name} ({self.id})\"\n\\ No newline at end of file\n", "issue": "Representation of Notation objects\nWhen I recently tried to edit a source, I was presented with an error message, and found that I was missing several required fields, including this one: ![Screen Shot 2022-09-06 at 08 59 56](https://user-images.githubusercontent.com/58090591/188641630-13f87954-0f92-405f-a561-effe3075a3db.png)\r\nNotation objects are currently pretty inscrutable. They should be represented such that at least their `name` property is visible.\r\n\r\nLarger question: why do we have notation objects at all? Currently, the notation model has only one property: `name`. Could this information in Source objects not be more simply represented by a CharField? Is using Notation objects simply the way things were done in OldCantus? Are we using them to ensure standardization among multiple Sources?\n", "before_files": [{"content": "from django.db import models\nfrom main_app.models import BaseModel\n\n\nclass Notation(BaseModel):\n name = models.CharField(max_length=63)\n", "path": "django/cantusdb_project/main_app/models/notation.py"}]}
806
120
gh_patches_debug_19368
rasdani/github-patches
git_diff
sopel-irc__sopel-610
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remind is case sensitive [22:20] <Eyore> .in 3SEC stuff [22:20] @Willie Eyore: Sorry, didn't understand the input. [22:20] <Eyore> .in 3 SEC stuff [22:20] @Willie Eyore: Sorry, didn't understand the input. [22:20] <Eyore> .in 3 S stuff [22:20] @Willie Eyore: Sorry, didn't understand the input. [22:20] <Eyore> .in 3 MIN stuff [22:20] @Willie Eyore: Sorry, didn't understand the input. [22:20] <Eyore> .in 3 Min stuff [22:20] @Willie Eyore: Sorry, didn't understand the input. I'd fix it myself, but looking at the code, I'm not sure where to put the .lower() </issue> <code> [start of willie/modules/remind.py] 1 # coding=utf8 2 """ 3 remind.py - Willie Reminder Module 4 Copyright 2011, Sean B. Palmer, inamidst.com 5 Licensed under the Eiffel Forum License 2. 6 7 http://willie.dftba.net 8 """ 9 from __future__ import unicode_literals 10 11 import os 12 import re 13 import time 14 import threading 15 import collections 16 import codecs 17 from datetime import datetime 18 from willie.module import commands, example, NOLIMIT 19 import willie.tools 20 21 try: 22 import pytz 23 except: 24 pytz = None 25 26 def filename(self): 27 name = self.nick + '-' + self.config.host + '.reminders.db' 28 return os.path.join(self.config.dotdir, name) 29 30 31 def load_database(name): 32 data = {} 33 if os.path.isfile(name): 34 f = codecs.open(name, 'r', encoding='utf-8') 35 for line in f: 36 unixtime, channel, nick, message = line.split('\t') 37 message = message.rstrip('\n') 38 t = int(float(unixtime)) # WTFs going on here? 39 reminder = (channel, nick, message) 40 try: 41 data[t].append(reminder) 42 except KeyError: 43 data[t] = [reminder] 44 f.close() 45 return data 46 47 48 def dump_database(name, data): 49 f = codecs.open(name, 'w', encoding='utf-8') 50 for unixtime, reminders in willie.tools.iteritems(data): 51 for channel, nick, message in reminders: 52 f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message)) 53 f.close() 54 55 56 def setup(bot): 57 bot.rfn = filename(bot) 58 bot.rdb = load_database(bot.rfn) 59 60 def monitor(bot): 61 time.sleep(5) 62 while True: 63 now = int(time.time()) 64 unixtimes = [int(key) for key in bot.rdb] 65 oldtimes = [t for t in unixtimes if t <= now] 66 if oldtimes: 67 for oldtime in oldtimes: 68 for (channel, nick, message) in bot.rdb[oldtime]: 69 if message: 70 bot.msg(channel, nick + ': ' + message) 71 else: 72 bot.msg(channel, nick + '!') 73 del bot.rdb[oldtime] 74 dump_database(bot.rfn, bot.rdb) 75 time.sleep(2.5) 76 77 targs = (bot,) 78 t = threading.Thread(target=monitor, args=targs) 79 t.start() 80 81 scaling = collections.OrderedDict([ 82 ('years', 365.25 * 24 * 3600), 83 ('year', 365.25 * 24 * 3600), 84 ('yrs', 365.25 * 24 * 3600), 85 ('y', 365.25 * 24 * 3600), 86 87 ('months', 29.53059 * 24 * 3600), 88 ('month', 29.53059 * 24 * 3600), 89 ('mo', 29.53059 * 24 * 3600), 90 91 ('weeks', 7 * 24 * 3600), 92 ('week', 7 * 24 * 3600), 93 ('wks', 7 * 24 * 3600), 94 ('wk', 7 * 24 * 3600), 95 ('w', 7 * 24 * 3600), 96 97 ('days', 24 * 3600), 98 ('day', 24 * 3600), 99 ('d', 24 * 3600), 100 101 ('hours', 3600), 102 ('hour', 3600), 103 ('hrs', 3600), 104 ('hr', 3600), 105 ('h', 3600), 106 107 ('minutes', 60), 108 ('minute', 60), 109 ('mins', 60), 110 ('min', 60), 111 ('m', 60), 112 113 ('seconds', 1), 114 ('second', 1), 115 ('secs', 1), 116 ('sec', 1), 117 ('s', 1), 118 ]) 119 120 periods = '|'.join(scaling.keys()) 121 122 123 @commands('in') 124 @example('.in 3h45m Go to class') 125 def remind(bot, trigger): 126 """Gives you a reminder in the given amount of time.""" 127 duration = 0 128 message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?', 129 trigger.group(2))[1:]) 130 reminder = '' 131 stop = False 132 for piece in message: 133 grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece) 134 if grp and not stop: 135 length = float(grp.group(1)) 136 factor = scaling.get(grp.group(2), 60) 137 duration += length * factor 138 else: 139 reminder = reminder + piece 140 stop = True 141 if duration == 0: 142 return bot.reply("Sorry, didn't understand the input.") 143 144 if duration % 1: 145 duration = int(duration) + 1 146 else: 147 duration = int(duration) 148 timezone = willie.tools.get_timezone( 149 bot.db, bot.config, None, trigger.nick, trigger.sender) 150 create_reminder(bot, trigger, duration, reminder, timezone) 151 152 153 @commands('at') 154 @example('.at 13:47 Do your homework!') 155 def at(bot, trigger): 156 """ 157 Gives you a reminder at the given time. Takes hh:mm:ssTimezone 158 message. Timezone is any timezone Willie takes elsewhere; the best choices 159 are those from the tzdb; a list of valid options is available at 160 http://dft.ba/-tz . The seconds and timezone are optional. 161 """ 162 regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)') 163 match = regex.match(trigger.group(2)) 164 if not match: 165 bot.reply("Sorry, but I didn't understand your input.") 166 return NOLIMIT 167 hour, minute, second, tz, message = match.groups() 168 if not second: 169 second = '0' 170 171 if pytz: 172 timezone = willie.tools.get_timezone(bot.db, bot.config, tz, 173 trigger.nick, trigger.sender) 174 if not timezone: 175 timezone = 'UTC' 176 now = datetime.now(pytz.timezone(timezone)) 177 at_time = datetime(now.year, now.month, now.day, 178 int(hour), int(minute), int(second), 179 tzinfo=now.tzinfo) 180 timediff = at_time - now 181 else: 182 if tz and tz.upper() != 'UTC': 183 bot.reply("I don't have timzeone support installed.") 184 return NOLIMIT 185 now = datetime.now() 186 at_time = datetime(now.year, now.month, now.day, 187 int(hour), int(minute), int(second)) 188 timediff = at_time - now 189 190 duration = timediff.seconds 191 192 if duration < 0: 193 duration += 86400 194 create_reminder(bot, trigger, duration, message, 'UTC') 195 196 197 def create_reminder(bot, trigger, duration, message, tz): 198 t = int(time.time()) + duration 199 reminder = (trigger.sender, trigger.nick, message) 200 try: 201 bot.rdb[t].append(reminder) 202 except KeyError: 203 bot.rdb[t] = [reminder] 204 205 dump_database(bot.rfn, bot.rdb) 206 207 if duration >= 60: 208 remind_at = datetime.utcfromtimestamp(t) 209 timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick, 210 trigger.sender, remind_at) 211 212 bot.reply('Okay, will remind at %s' % timef) 213 else: 214 bot.reply('Okay, will remind in %s secs' % duration) 215 [end of willie/modules/remind.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/willie/modules/remind.py b/willie/modules/remind.py --- a/willie/modules/remind.py +++ b/willie/modules/remind.py @@ -125,7 +125,7 @@ def remind(bot, trigger): """Gives you a reminder in the given amount of time.""" duration = 0 - message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:' + periods + ')) ?', + message = filter(None, re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?', trigger.group(2))[1:]) reminder = '' stop = False @@ -133,7 +133,7 @@ grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece) if grp and not stop: length = float(grp.group(1)) - factor = scaling.get(grp.group(2), 60) + factor = scaling.get(grp.group(2).lower(), 60) duration += length * factor else: reminder = reminder + piece
{"golden_diff": "diff --git a/willie/modules/remind.py b/willie/modules/remind.py\n--- a/willie/modules/remind.py\n+++ b/willie/modules/remind.py\n@@ -125,7 +125,7 @@\n def remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n- message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:' + periods + ')) ?',\n+ message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:(?i)' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n@@ -133,7 +133,7 @@\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n- factor = scaling.get(grp.group(2), 60)\n+ factor = scaling.get(grp.group(2).lower(), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n", "issue": "Remind is case sensitive\n[22:20] <Eyore> .in 3SEC stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 SEC stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 S stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 MIN stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n[22:20] <Eyore> .in 3 Min stuff\n[22:20] @Willie Eyore: Sorry, didn't understand the input.\n\nI'd fix it myself, but looking at the code, I'm not sure where to put the .lower()\n\n", "before_files": [{"content": "# coding=utf8\n\"\"\"\nremind.py - Willie Reminder Module\nCopyright 2011, Sean B. Palmer, inamidst.com\nLicensed under the Eiffel Forum License 2.\n\nhttp://willie.dftba.net\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport re\nimport time\nimport threading\nimport collections\nimport codecs\nfrom datetime import datetime\nfrom willie.module import commands, example, NOLIMIT\nimport willie.tools\n\ntry:\n import pytz\nexcept:\n pytz = None\n\ndef filename(self):\n name = self.nick + '-' + self.config.host + '.reminders.db'\n return os.path.join(self.config.dotdir, name)\n\n\ndef load_database(name):\n data = {}\n if os.path.isfile(name):\n f = codecs.open(name, 'r', encoding='utf-8')\n for line in f:\n unixtime, channel, nick, message = line.split('\\t')\n message = message.rstrip('\\n')\n t = int(float(unixtime)) # WTFs going on here?\n reminder = (channel, nick, message)\n try:\n data[t].append(reminder)\n except KeyError:\n data[t] = [reminder]\n f.close()\n return data\n\n\ndef dump_database(name, data):\n f = codecs.open(name, 'w', encoding='utf-8')\n for unixtime, reminders in willie.tools.iteritems(data):\n for channel, nick, message in reminders:\n f.write('%s\\t%s\\t%s\\t%s\\n' % (unixtime, channel, nick, message))\n f.close()\n\n\ndef setup(bot):\n bot.rfn = filename(bot)\n bot.rdb = load_database(bot.rfn)\n\n def monitor(bot):\n time.sleep(5)\n while True:\n now = int(time.time())\n unixtimes = [int(key) for key in bot.rdb]\n oldtimes = [t for t in unixtimes if t <= now]\n if oldtimes:\n for oldtime in oldtimes:\n for (channel, nick, message) in bot.rdb[oldtime]:\n if message:\n bot.msg(channel, nick + ': ' + message)\n else:\n bot.msg(channel, nick + '!')\n del bot.rdb[oldtime]\n dump_database(bot.rfn, bot.rdb)\n time.sleep(2.5)\n\n targs = (bot,)\n t = threading.Thread(target=monitor, args=targs)\n t.start()\n\nscaling = collections.OrderedDict([\n ('years', 365.25 * 24 * 3600),\n ('year', 365.25 * 24 * 3600),\n ('yrs', 365.25 * 24 * 3600),\n ('y', 365.25 * 24 * 3600),\n\n ('months', 29.53059 * 24 * 3600),\n ('month', 29.53059 * 24 * 3600),\n ('mo', 29.53059 * 24 * 3600),\n\n ('weeks', 7 * 24 * 3600),\n ('week', 7 * 24 * 3600),\n ('wks', 7 * 24 * 3600),\n ('wk', 7 * 24 * 3600),\n ('w', 7 * 24 * 3600),\n\n ('days', 24 * 3600),\n ('day', 24 * 3600),\n ('d', 24 * 3600),\n\n ('hours', 3600),\n ('hour', 3600),\n ('hrs', 3600),\n ('hr', 3600),\n ('h', 3600),\n\n ('minutes', 60),\n ('minute', 60),\n ('mins', 60),\n ('min', 60),\n ('m', 60),\n\n ('seconds', 1),\n ('second', 1),\n ('secs', 1),\n ('sec', 1),\n ('s', 1),\n])\n\nperiods = '|'.join(scaling.keys())\n\n\n@commands('in')\n@example('.in 3h45m Go to class')\ndef remind(bot, trigger):\n \"\"\"Gives you a reminder in the given amount of time.\"\"\"\n duration = 0\n message = filter(None, re.split('(\\d+(?:\\.\\d+)? ?(?:' + periods + ')) ?',\n trigger.group(2))[1:])\n reminder = ''\n stop = False\n for piece in message:\n grp = re.match('(\\d+(?:\\.\\d+)?) ?(.*) ?', piece)\n if grp and not stop:\n length = float(grp.group(1))\n factor = scaling.get(grp.group(2), 60)\n duration += length * factor\n else:\n reminder = reminder + piece\n stop = True\n if duration == 0:\n return bot.reply(\"Sorry, didn't understand the input.\")\n\n if duration % 1:\n duration = int(duration) + 1\n else:\n duration = int(duration)\n timezone = willie.tools.get_timezone(\n bot.db, bot.config, None, trigger.nick, trigger.sender)\n create_reminder(bot, trigger, duration, reminder, timezone)\n\n\n@commands('at')\n@example('.at 13:47 Do your homework!')\ndef at(bot, trigger):\n \"\"\"\n Gives you a reminder at the given time. Takes hh:mm:ssTimezone\n message. Timezone is any timezone Willie takes elsewhere; the best choices\n are those from the tzdb; a list of valid options is available at\n http://dft.ba/-tz . The seconds and timezone are optional.\n \"\"\"\n regex = re.compile(r'(\\d+):(\\d+)(?::(\\d+))?([^\\s\\d]+)? (.*)')\n match = regex.match(trigger.group(2))\n if not match:\n bot.reply(\"Sorry, but I didn't understand your input.\")\n return NOLIMIT\n hour, minute, second, tz, message = match.groups()\n if not second:\n second = '0'\n\n if pytz:\n timezone = willie.tools.get_timezone(bot.db, bot.config, tz,\n trigger.nick, trigger.sender)\n if not timezone:\n timezone = 'UTC'\n now = datetime.now(pytz.timezone(timezone))\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second),\n tzinfo=now.tzinfo)\n timediff = at_time - now\n else:\n if tz and tz.upper() != 'UTC':\n bot.reply(\"I don't have timzeone support installed.\")\n return NOLIMIT\n now = datetime.now()\n at_time = datetime(now.year, now.month, now.day,\n int(hour), int(minute), int(second))\n timediff = at_time - now\n\n duration = timediff.seconds\n\n if duration < 0:\n duration += 86400\n create_reminder(bot, trigger, duration, message, 'UTC')\n\n\ndef create_reminder(bot, trigger, duration, message, tz):\n t = int(time.time()) + duration\n reminder = (trigger.sender, trigger.nick, message)\n try:\n bot.rdb[t].append(reminder)\n except KeyError:\n bot.rdb[t] = [reminder]\n\n dump_database(bot.rfn, bot.rdb)\n\n if duration >= 60:\n remind_at = datetime.utcfromtimestamp(t)\n timef = willie.tools.format_time(bot.db, bot.config, tz, trigger.nick,\n trigger.sender, remind_at)\n\n bot.reply('Okay, will remind at %s' % timef)\n else:\n bot.reply('Okay, will remind in %s secs' % duration)\n", "path": "willie/modules/remind.py"}]}
3,091
261
gh_patches_debug_42849
rasdani/github-patches
git_diff
ray-project__ray-9386
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [rllib] incorrect model output for DQN with torch and dueling=false <!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant--> ### What is the problem? The output fo the DQN model is not within the action space. Something is wrong when constructing the torch model when dueling is off. The output dimension of the model is equal to whatever is passed in "fcnet_hiddens" instead of being of the size of the action space. *Ray version and other system information (Python version, TensorFlow version, OS):* - ray==0.9.0.dev0 - python 3.6.10 - mac OS ### Reproduction (REQUIRED) ```python import ray from ray import tune ray.init() config = { "env": "CartPole-v1", "num_workers": 1, "train_batch_size": 128, "learning_starts": 128, "model": {"fcnet_hiddens": [32]}, "dueling": False , "framework": "torch" } tune.run("DQN", name="MWE", config=config, stop={"training_iteration": 100}) ``` - [x] I have verified my script runs in a clean environment and reproduces the issue. - [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html). </issue> <code> [start of rllib/agents/dqn/dqn_torch_model.py] 1 import numpy as np 2 3 from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 4 from ray.rllib.utils.framework import try_import_torch 5 6 torch, nn = try_import_torch() 7 8 9 class DQNTorchModel(TorchModelV2, nn.Module): 10 """Extension of standard TorchModelV2 to provide dueling-Q functionality. 11 """ 12 13 def __init__( 14 self, 15 obs_space, 16 action_space, 17 num_outputs, 18 model_config, 19 name, 20 *, 21 dueling=False, 22 q_hiddens=(256, ), 23 dueling_activation="relu", 24 use_noisy=False, 25 sigma0=0.5, 26 # TODO(sven): Move `add_layer_norm` into ModelCatalog as 27 # generic option, then error if we use ParameterNoise as 28 # Exploration type and do not have any LayerNorm layers in 29 # the net. 30 add_layer_norm=False): 31 """Initialize variables of this model. 32 33 Extra model kwargs: 34 dueling (bool): Whether to build the advantage(A)/value(V) heads 35 for DDQN. If True, Q-values are calculated as: 36 Q = (A - mean[A]) + V. If False, raw NN output is interpreted 37 as Q-values. 38 q_hiddens (List[int]): List of layer-sizes after(!) the 39 Advantages(A)/Value(V)-split. Hence, each of the A- and V- 40 branches will have this structure of Dense layers. To define 41 the NN before this A/V-split, use - as always - 42 config["model"]["fcnet_hiddens"]. 43 dueling_activation (str): The activation to use for all dueling 44 layers (A- and V-branch). One of "relu", "tanh", "linear". 45 use_noisy (bool): use noisy nets 46 sigma0 (float): initial value of noisy nets 47 add_layer_norm (bool): Enable layer norm (for param noise). 48 """ 49 nn.Module.__init__(self) 50 super(DQNTorchModel, self).__init__(obs_space, action_space, 51 num_outputs, model_config, name) 52 53 self.dueling = dueling 54 ins = num_outputs 55 56 # Dueling case: Build the shared (advantages and value) fc-network. 57 advantage_module = nn.Sequential() 58 value_module = None 59 if self.dueling: 60 value_module = nn.Sequential() 61 for i, n in enumerate(q_hiddens): 62 advantage_module.add_module("dueling_A_{}".format(i), 63 nn.Linear(ins, n)) 64 value_module.add_module("dueling_V_{}".format(i), 65 nn.Linear(ins, n)) 66 # Add activations if necessary. 67 if dueling_activation == "relu": 68 advantage_module.add_module("dueling_A_act_{}".format(i), 69 nn.ReLU()) 70 value_module.add_module("dueling_V_act_{}".format(i), 71 nn.ReLU()) 72 elif dueling_activation == "tanh": 73 advantage_module.add_module("dueling_A_act_{}".format(i), 74 nn.Tanh()) 75 value_module.add_module("dueling_V_act_{}".format(i), 76 nn.Tanh()) 77 78 # Add LayerNorm after each Dense. 79 if add_layer_norm: 80 advantage_module.add_module("LayerNorm_A_{}".format(i), 81 nn.LayerNorm(n)) 82 value_module.add_module("LayerNorm_V_{}".format(i), 83 nn.LayerNorm(n)) 84 ins = n 85 # Actual Advantages layer (nodes=num-actions) and 86 # value layer (nodes=1). 87 advantage_module.add_module("A", nn.Linear(ins, action_space.n)) 88 value_module.add_module("V", nn.Linear(ins, 1)) 89 # Non-dueling: 90 # Q-value layer (use main module's outputs as Q-values). 91 else: 92 pass 93 94 self.advantage_module = advantage_module 95 self.value_module = value_module 96 97 def get_advantages_or_q_values(self, model_out): 98 """Returns distributional values for Q(s, a) given a state embedding. 99 100 Override this in your custom model to customize the Q output head. 101 102 Arguments: 103 model_out (Tensor): embedding from the model layers 104 105 Returns: 106 (action_scores, logits, dist) if num_atoms == 1, otherwise 107 (action_scores, z, support_logits_per_action, logits, dist) 108 """ 109 110 return self.advantage_module(model_out) 111 112 def get_state_value(self, model_out): 113 """Returns the state value prediction for the given state embedding.""" 114 115 return self.value_module(model_out) 116 117 def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True): 118 """ 119 a common dense layer: y = w^{T}x + b 120 a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x + 121 (b+\\epsilon_b*\\sigma_b) 122 where \epsilon are random variables sampled from factorized normal 123 distributions and \\sigma are trainable variables which are expected to 124 vanish along the training procedure 125 """ 126 in_size = int(action_in.shape[1]) 127 128 epsilon_in = torch.normal( 129 mean=torch.zeros([in_size]), std=torch.ones([in_size])) 130 epsilon_out = torch.normal( 131 mean=torch.zeros([out_size]), std=torch.ones([out_size])) 132 epsilon_in = self._f_epsilon(epsilon_in) 133 epsilon_out = self._f_epsilon(epsilon_out) 134 epsilon_w = torch.matmul( 135 torch.unsqueeze(epsilon_in, -1), 136 other=torch.unsqueeze(epsilon_out, 0)) 137 epsilon_b = epsilon_out 138 139 sigma_w = torch.Tensor( 140 data=np.random.uniform( 141 low=-1.0 / np.sqrt(float(in_size)), 142 high=1.0 / np.sqrt(float(in_size)), 143 size=[in_size, out_size]), 144 dtype=torch.float32, 145 requires_grad=True) 146 # TF noise generation can be unreliable on GPU 147 # If generating the noise on the CPU, 148 # lowering sigma0 to 0.1 may be helpful 149 sigma_b = torch.Tensor( 150 data=np.full( 151 shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))), 152 requires_grad=True) 153 w = torch.Tensor( 154 data=np.full( 155 shape=[in_size, out_size], 156 fill_value=6 / np.sqrt(float(in_size) + float(out_size))), 157 requires_grad=True) 158 b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True) 159 action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \ 160 + b + sigma_b * epsilon_b 161 162 if not non_linear: 163 return action_activation 164 return nn.functional.relu(action_activation) 165 166 def _f_epsilon(self, x): 167 return torch.sign(x) * torch.pow(torch.abs(x), 0.5) 168 [end of rllib/agents/dqn/dqn_torch_model.py] [start of rllib/examples/parametric_actions_cartpole.py] 1 """Example of handling variable length and/or parametric action spaces. 2 3 This is a toy example of the action-embedding based approach for handling large 4 discrete action spaces (potentially infinite in size), similar to this: 5 6 https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/ 7 8 This currently works with RLlib's policy gradient style algorithms 9 (e.g., PG, PPO, IMPALA, A2C) and also DQN. 10 11 Note that since the model outputs now include "-inf" tf.float32.min 12 values, not all algorithm options are supported at the moment. For example, 13 algorithms might crash if they don't properly ignore the -inf action scores. 14 Working configurations are given below. 15 """ 16 17 import argparse 18 19 import ray 20 from ray import tune 21 from ray.rllib.examples.env.parametric_actions_cartpole import \ 22 ParametricActionsCartPole 23 from ray.rllib.examples.models.parametric_actions_model import \ 24 ParametricActionsModel, TorchParametricActionsModel 25 from ray.rllib.models import ModelCatalog 26 from ray.rllib.utils.test_utils import check_learning_achieved 27 from ray.tune.registry import register_env 28 29 parser = argparse.ArgumentParser() 30 parser.add_argument("--run", type=str, default="PPO") 31 parser.add_argument("--torch", action="store_true") 32 parser.add_argument("--as-test", action="store_true") 33 parser.add_argument("--stop-iters", type=int, default=200) 34 parser.add_argument("--stop-reward", type=float, default=150.0) 35 parser.add_argument("--stop-timesteps", type=int, default=100000) 36 37 if __name__ == "__main__": 38 args = parser.parse_args() 39 ray.init() 40 41 register_env("pa_cartpole", lambda _: ParametricActionsCartPole(10)) 42 ModelCatalog.register_custom_model( 43 "pa_model", TorchParametricActionsModel 44 if args.torch else ParametricActionsModel) 45 46 if args.run == "DQN": 47 cfg = { 48 # TODO(ekl) we need to set these to prevent the masked values 49 # from being further processed in DistributionalQModel, which 50 # would mess up the masking. It is possible to support these if we 51 # defined a a custom DistributionalQModel that is aware of masking. 52 "hiddens": [], 53 "dueling": False, 54 } 55 else: 56 cfg = {} 57 58 config = dict({ 59 "env": "pa_cartpole", 60 "model": { 61 "custom_model": "pa_model", 62 }, 63 "num_workers": 0, 64 "framework": "torch" if args.torch else "tf", 65 }, **cfg) 66 67 stop = { 68 "training_iteration": args.stop_iters, 69 "timesteps_total": args.stop_timesteps, 70 "episode_reward_mean": args.stop_reward, 71 } 72 73 results = tune.run(args.run, stop=stop, config=config, verbose=1) 74 75 if args.as_test: 76 check_learning_achieved(results, args.stop_reward) 77 78 ray.shutdown() 79 [end of rllib/examples/parametric_actions_cartpole.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rllib/agents/dqn/dqn_torch_model.py b/rllib/agents/dqn/dqn_torch_model.py --- a/rllib/agents/dqn/dqn_torch_model.py +++ b/rllib/agents/dqn/dqn_torch_model.py @@ -53,46 +53,45 @@ self.dueling = dueling ins = num_outputs - # Dueling case: Build the shared (advantages and value) fc-network. advantage_module = nn.Sequential() - value_module = None - if self.dueling: - value_module = nn.Sequential() - for i, n in enumerate(q_hiddens): - advantage_module.add_module("dueling_A_{}".format(i), - nn.Linear(ins, n)) - value_module.add_module("dueling_V_{}".format(i), + value_module = nn.Sequential() + + # Dueling case: Build the shared (advantages and value) fc-network. + for i, n in enumerate(q_hiddens): + advantage_module.add_module("dueling_A_{}".format(i), nn.Linear(ins, n)) - # Add activations if necessary. - if dueling_activation == "relu": - advantage_module.add_module("dueling_A_act_{}".format(i), - nn.ReLU()) - value_module.add_module("dueling_V_act_{}".format(i), + value_module.add_module("dueling_V_{}".format(i), + nn.Linear(ins, n)) + # Add activations if necessary. + if dueling_activation == "relu": + advantage_module.add_module("dueling_A_act_{}".format(i), nn.ReLU()) - elif dueling_activation == "tanh": - advantage_module.add_module("dueling_A_act_{}".format(i), - nn.Tanh()) - value_module.add_module("dueling_V_act_{}".format(i), + value_module.add_module("dueling_V_act_{}".format(i), + nn.ReLU()) + elif dueling_activation == "tanh": + advantage_module.add_module("dueling_A_act_{}".format(i), nn.Tanh()) + value_module.add_module("dueling_V_act_{}".format(i), + nn.Tanh()) - # Add LayerNorm after each Dense. - if add_layer_norm: - advantage_module.add_module("LayerNorm_A_{}".format(i), - nn.LayerNorm(n)) - value_module.add_module("LayerNorm_V_{}".format(i), + # Add LayerNorm after each Dense. + if add_layer_norm: + advantage_module.add_module("LayerNorm_A_{}".format(i), nn.LayerNorm(n)) - ins = n - # Actual Advantages layer (nodes=num-actions) and - # value layer (nodes=1). + value_module.add_module("LayerNorm_V_{}".format(i), + nn.LayerNorm(n)) + ins = n + + # Actual Advantages layer (nodes=num-actions). + if q_hiddens: advantage_module.add_module("A", nn.Linear(ins, action_space.n)) - value_module.add_module("V", nn.Linear(ins, 1)) - # Non-dueling: - # Q-value layer (use main module's outputs as Q-values). - else: - pass self.advantage_module = advantage_module - self.value_module = value_module + + # Value layer (nodes=1). + if self.dueling: + value_module.add_module("V", nn.Linear(ins, 1)) + self.value_module = value_module def get_advantages_or_q_values(self, model_out): """Returns distributional values for Q(s, a) given a state embedding. diff --git a/rllib/examples/parametric_actions_cartpole.py b/rllib/examples/parametric_actions_cartpole.py --- a/rllib/examples/parametric_actions_cartpole.py +++ b/rllib/examples/parametric_actions_cartpole.py @@ -48,7 +48,7 @@ # TODO(ekl) we need to set these to prevent the masked values # from being further processed in DistributionalQModel, which # would mess up the masking. It is possible to support these if we - # defined a a custom DistributionalQModel that is aware of masking. + # defined a custom DistributionalQModel that is aware of masking. "hiddens": [], "dueling": False, }
{"golden_diff": "diff --git a/rllib/agents/dqn/dqn_torch_model.py b/rllib/agents/dqn/dqn_torch_model.py\n--- a/rllib/agents/dqn/dqn_torch_model.py\n+++ b/rllib/agents/dqn/dqn_torch_model.py\n@@ -53,46 +53,45 @@\n self.dueling = dueling\n ins = num_outputs\n \n- # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n- value_module = None\n- if self.dueling:\n- value_module = nn.Sequential()\n- for i, n in enumerate(q_hiddens):\n- advantage_module.add_module(\"dueling_A_{}\".format(i),\n- nn.Linear(ins, n))\n- value_module.add_module(\"dueling_V_{}\".format(i),\n+ value_module = nn.Sequential()\n+\n+ # Dueling case: Build the shared (advantages and value) fc-network.\n+ for i, n in enumerate(q_hiddens):\n+ advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n- # Add activations if necessary.\n- if dueling_activation == \"relu\":\n- advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n- nn.ReLU())\n- value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ value_module.add_module(\"dueling_V_{}\".format(i),\n+ nn.Linear(ins, n))\n+ # Add activations if necessary.\n+ if dueling_activation == \"relu\":\n+ advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n- elif dueling_activation == \"tanh\":\n- advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n- nn.Tanh())\n- value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ nn.ReLU())\n+ elif dueling_activation == \"tanh\":\n+ advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n+ value_module.add_module(\"dueling_V_act_{}\".format(i),\n+ nn.Tanh())\n \n- # Add LayerNorm after each Dense.\n- if add_layer_norm:\n- advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n- nn.LayerNorm(n))\n- value_module.add_module(\"LayerNorm_V_{}\".format(i),\n+ # Add LayerNorm after each Dense.\n+ if add_layer_norm:\n+ advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n- ins = n\n- # Actual Advantages layer (nodes=num-actions) and\n- # value layer (nodes=1).\n+ value_module.add_module(\"LayerNorm_V_{}\".format(i),\n+ nn.LayerNorm(n))\n+ ins = n\n+\n+ # Actual Advantages layer (nodes=num-actions).\n+ if q_hiddens:\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n- value_module.add_module(\"V\", nn.Linear(ins, 1))\n- # Non-dueling:\n- # Q-value layer (use main module's outputs as Q-values).\n- else:\n- pass\n \n self.advantage_module = advantage_module\n- self.value_module = value_module\n+\n+ # Value layer (nodes=1).\n+ if self.dueling:\n+ value_module.add_module(\"V\", nn.Linear(ins, 1))\n+ self.value_module = value_module\n \n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\ndiff --git a/rllib/examples/parametric_actions_cartpole.py b/rllib/examples/parametric_actions_cartpole.py\n--- a/rllib/examples/parametric_actions_cartpole.py\n+++ b/rllib/examples/parametric_actions_cartpole.py\n@@ -48,7 +48,7 @@\n # TODO(ekl) we need to set these to prevent the masked values\n # from being further processed in DistributionalQModel, which\n # would mess up the masking. It is possible to support these if we\n- # defined a a custom DistributionalQModel that is aware of masking.\n+ # defined a custom DistributionalQModel that is aware of masking.\n \"hiddens\": [],\n \"dueling\": False,\n }\n", "issue": "[rllib] incorrect model output for DQN with torch and dueling=false \n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\nThe output fo the DQN model is not within the action space. \r\n\r\nSomething is wrong when constructing the torch model when dueling is off. The output dimension of the model is equal to whatever is passed in \"fcnet_hiddens\" instead of being of the size of the action space. \r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\n- ray==0.9.0.dev0 \r\n- python 3.6.10\r\n- mac OS\r\n\r\n### Reproduction (REQUIRED)\r\n\r\n```python\r\nimport ray\r\nfrom ray import tune\r\n\r\nray.init()\r\n\r\nconfig = {\r\n \"env\": \"CartPole-v1\",\r\n \"num_workers\": 1,\r\n \"train_batch_size\": 128,\r\n \"learning_starts\": 128,\r\n \"model\": {\"fcnet_hiddens\": [32]},\r\n \"dueling\": False ,\r\n \"framework\": \"torch\"\r\n}\r\n\r\ntune.run(\"DQN\", name=\"MWE\", config=config, stop={\"training_iteration\": 100})\r\n```\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [x] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "import numpy as np\n\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\nfrom ray.rllib.utils.framework import try_import_torch\n\ntorch, nn = try_import_torch()\n\n\nclass DQNTorchModel(TorchModelV2, nn.Module):\n \"\"\"Extension of standard TorchModelV2 to provide dueling-Q functionality.\n \"\"\"\n\n def __init__(\n self,\n obs_space,\n action_space,\n num_outputs,\n model_config,\n name,\n *,\n dueling=False,\n q_hiddens=(256, ),\n dueling_activation=\"relu\",\n use_noisy=False,\n sigma0=0.5,\n # TODO(sven): Move `add_layer_norm` into ModelCatalog as\n # generic option, then error if we use ParameterNoise as\n # Exploration type and do not have any LayerNorm layers in\n # the net.\n add_layer_norm=False):\n \"\"\"Initialize variables of this model.\n\n Extra model kwargs:\n dueling (bool): Whether to build the advantage(A)/value(V) heads\n for DDQN. If True, Q-values are calculated as:\n Q = (A - mean[A]) + V. If False, raw NN output is interpreted\n as Q-values.\n q_hiddens (List[int]): List of layer-sizes after(!) the\n Advantages(A)/Value(V)-split. Hence, each of the A- and V-\n branches will have this structure of Dense layers. To define\n the NN before this A/V-split, use - as always -\n config[\"model\"][\"fcnet_hiddens\"].\n dueling_activation (str): The activation to use for all dueling\n layers (A- and V-branch). One of \"relu\", \"tanh\", \"linear\".\n use_noisy (bool): use noisy nets\n sigma0 (float): initial value of noisy nets\n add_layer_norm (bool): Enable layer norm (for param noise).\n \"\"\"\n nn.Module.__init__(self)\n super(DQNTorchModel, self).__init__(obs_space, action_space,\n num_outputs, model_config, name)\n\n self.dueling = dueling\n ins = num_outputs\n\n # Dueling case: Build the shared (advantages and value) fc-network.\n advantage_module = nn.Sequential()\n value_module = None\n if self.dueling:\n value_module = nn.Sequential()\n for i, n in enumerate(q_hiddens):\n advantage_module.add_module(\"dueling_A_{}\".format(i),\n nn.Linear(ins, n))\n value_module.add_module(\"dueling_V_{}\".format(i),\n nn.Linear(ins, n))\n # Add activations if necessary.\n if dueling_activation == \"relu\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.ReLU())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.ReLU())\n elif dueling_activation == \"tanh\":\n advantage_module.add_module(\"dueling_A_act_{}\".format(i),\n nn.Tanh())\n value_module.add_module(\"dueling_V_act_{}\".format(i),\n nn.Tanh())\n\n # Add LayerNorm after each Dense.\n if add_layer_norm:\n advantage_module.add_module(\"LayerNorm_A_{}\".format(i),\n nn.LayerNorm(n))\n value_module.add_module(\"LayerNorm_V_{}\".format(i),\n nn.LayerNorm(n))\n ins = n\n # Actual Advantages layer (nodes=num-actions) and\n # value layer (nodes=1).\n advantage_module.add_module(\"A\", nn.Linear(ins, action_space.n))\n value_module.add_module(\"V\", nn.Linear(ins, 1))\n # Non-dueling:\n # Q-value layer (use main module's outputs as Q-values).\n else:\n pass\n\n self.advantage_module = advantage_module\n self.value_module = value_module\n\n def get_advantages_or_q_values(self, model_out):\n \"\"\"Returns distributional values for Q(s, a) given a state embedding.\n\n Override this in your custom model to customize the Q output head.\n\n Arguments:\n model_out (Tensor): embedding from the model layers\n\n Returns:\n (action_scores, logits, dist) if num_atoms == 1, otherwise\n (action_scores, z, support_logits_per_action, logits, dist)\n \"\"\"\n\n return self.advantage_module(model_out)\n\n def get_state_value(self, model_out):\n \"\"\"Returns the state value prediction for the given state embedding.\"\"\"\n\n return self.value_module(model_out)\n\n def _noisy_layer(self, action_in, out_size, sigma0, non_linear=True):\n \"\"\"\n a common dense layer: y = w^{T}x + b\n a noisy layer: y = (w + \\\\epsilon_w*\\\\sigma_w)^{T}x +\n (b+\\\\epsilon_b*\\\\sigma_b)\n where \\epsilon are random variables sampled from factorized normal\n distributions and \\\\sigma are trainable variables which are expected to\n vanish along the training procedure\n \"\"\"\n in_size = int(action_in.shape[1])\n\n epsilon_in = torch.normal(\n mean=torch.zeros([in_size]), std=torch.ones([in_size]))\n epsilon_out = torch.normal(\n mean=torch.zeros([out_size]), std=torch.ones([out_size]))\n epsilon_in = self._f_epsilon(epsilon_in)\n epsilon_out = self._f_epsilon(epsilon_out)\n epsilon_w = torch.matmul(\n torch.unsqueeze(epsilon_in, -1),\n other=torch.unsqueeze(epsilon_out, 0))\n epsilon_b = epsilon_out\n\n sigma_w = torch.Tensor(\n data=np.random.uniform(\n low=-1.0 / np.sqrt(float(in_size)),\n high=1.0 / np.sqrt(float(in_size)),\n size=[in_size, out_size]),\n dtype=torch.float32,\n requires_grad=True)\n # TF noise generation can be unreliable on GPU\n # If generating the noise on the CPU,\n # lowering sigma0 to 0.1 may be helpful\n sigma_b = torch.Tensor(\n data=np.full(\n shape=[out_size], fill_value=sigma0 / np.sqrt(float(in_size))),\n requires_grad=True)\n w = torch.Tensor(\n data=np.full(\n shape=[in_size, out_size],\n fill_value=6 / np.sqrt(float(in_size) + float(out_size))),\n requires_grad=True)\n b = torch.Tensor(data=np.zeros([out_size]), requires_grad=True)\n action_activation = torch.matmul(action_in, w + sigma_w * epsilon_w) \\\n + b + sigma_b * epsilon_b\n\n if not non_linear:\n return action_activation\n return nn.functional.relu(action_activation)\n\n def _f_epsilon(self, x):\n return torch.sign(x) * torch.pow(torch.abs(x), 0.5)\n", "path": "rllib/agents/dqn/dqn_torch_model.py"}, {"content": "\"\"\"Example of handling variable length and/or parametric action spaces.\n\nThis is a toy example of the action-embedding based approach for handling large\ndiscrete action spaces (potentially infinite in size), similar to this:\n\n https://neuro.cs.ut.ee/the-use-of-embeddings-in-openai-five/\n\nThis currently works with RLlib's policy gradient style algorithms\n(e.g., PG, PPO, IMPALA, A2C) and also DQN.\n\nNote that since the model outputs now include \"-inf\" tf.float32.min\nvalues, not all algorithm options are supported at the moment. For example,\nalgorithms might crash if they don't properly ignore the -inf action scores.\nWorking configurations are given below.\n\"\"\"\n\nimport argparse\n\nimport ray\nfrom ray import tune\nfrom ray.rllib.examples.env.parametric_actions_cartpole import \\\n ParametricActionsCartPole\nfrom ray.rllib.examples.models.parametric_actions_model import \\\n ParametricActionsModel, TorchParametricActionsModel\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.utils.test_utils import check_learning_achieved\nfrom ray.tune.registry import register_env\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--run\", type=str, default=\"PPO\")\nparser.add_argument(\"--torch\", action=\"store_true\")\nparser.add_argument(\"--as-test\", action=\"store_true\")\nparser.add_argument(\"--stop-iters\", type=int, default=200)\nparser.add_argument(\"--stop-reward\", type=float, default=150.0)\nparser.add_argument(\"--stop-timesteps\", type=int, default=100000)\n\nif __name__ == \"__main__\":\n args = parser.parse_args()\n ray.init()\n\n register_env(\"pa_cartpole\", lambda _: ParametricActionsCartPole(10))\n ModelCatalog.register_custom_model(\n \"pa_model\", TorchParametricActionsModel\n if args.torch else ParametricActionsModel)\n\n if args.run == \"DQN\":\n cfg = {\n # TODO(ekl) we need to set these to prevent the masked values\n # from being further processed in DistributionalQModel, which\n # would mess up the masking. It is possible to support these if we\n # defined a a custom DistributionalQModel that is aware of masking.\n \"hiddens\": [],\n \"dueling\": False,\n }\n else:\n cfg = {}\n\n config = dict({\n \"env\": \"pa_cartpole\",\n \"model\": {\n \"custom_model\": \"pa_model\",\n },\n \"num_workers\": 0,\n \"framework\": \"torch\" if args.torch else \"tf\",\n }, **cfg)\n\n stop = {\n \"training_iteration\": args.stop_iters,\n \"timesteps_total\": args.stop_timesteps,\n \"episode_reward_mean\": args.stop_reward,\n }\n\n results = tune.run(args.run, stop=stop, config=config, verbose=1)\n\n if args.as_test:\n check_learning_achieved(results, args.stop_reward)\n\n ray.shutdown()\n", "path": "rllib/examples/parametric_actions_cartpole.py"}]}
3,581
978
gh_patches_debug_35288
rasdani/github-patches
git_diff
optuna__optuna-4151
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> JournalStorage not working on Windows10 ### Expected behavior I was using the minimal JournalStorage example from the optuna documentation on my Windows10 PC. However the script is not able to run and throws every time json decoding errors. According to the latest documentation I didn´t see any limitations regarding the OS on which JournalStorage can be used. But maybe I was missing something. ### Environment - Optuna version:3.1.0.dev - Python version:3.10.6 - OS:Windows-10-10.0.19044-SP0 - (Optional) Other libraries and their versions: ### Error messages, stack traces, or logs ```shell C:\1_Files\M\optuna_trial.py:9: ExperimentalWarning: JournalStorage is experimental (supported from v3.1.0). The interface can change in the future. storage = optuna.storages.JournalStorage( Traceback (most recent call last): File "C:\1_Files\M\optuna_trial.py", line 12, in <module> study = optuna.create_study(storage=storage) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\_convert_positional_args.py", line 63, in converter_wrapper return func(**kwargs) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\study\study.py", line 1155, in create_study study_id = storage.create_new_study(study_name) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\storage.py", line 111, in create_new_study self._sync_with_backend() File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\storage.py", line 103, in _sync_with_backend logs = self._backend.read_logs(self._replay_result.log_number_read) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\file.py", line 174, in read_logs raise last_decode_error File "C:\Users\Prometheus\miniconda3\envs\py310\lib\site-packages\optuna\storages\_journal\file.py", line 184, in read_logs logs.append(json.loads(line)) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\__init__.py", line 346, in loads return _default_decoder.decode(s) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 337, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 353, in raw_decode obj, end = self.scan_once(s, idx) json.decoder.JSONDecodeError: Invalid control character at: line 1 column 3 (char 2) ``` ### Steps to reproduce 1. Run any JournalStorage example on a Windows10 PC 2. Python finishes with error 3. ```python import optuna def objective(trial): x = trial.suggest_float("x", -10, 10) return (x - 2) ** 2 storage = optuna.storages.JournalStorage( optuna.storages.JournalFileStorage("./log_file")) study = optuna.create_study(storage=storage) study.optimize(objective) ``` ### Additional context (optional) running the script as an admin yields a slightly different error ``` File "C:\Users\Prometheus\miniconda3\envs\py310\lib\json\decoder.py", line 355, in raw_decode raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError: Expecting value: line 2 column 1 (char 1) ``` </issue> <code> [start of optuna/storages/_journal/file.py] 1 import abc 2 from contextlib import contextmanager 3 import errno 4 import json 5 import os 6 import time 7 from typing import Any 8 from typing import Dict 9 from typing import Iterator 10 from typing import List 11 from typing import Optional 12 import uuid 13 14 from optuna.storages._journal.base import BaseJournalLogStorage 15 16 17 LOCK_FILE_SUFFIX = ".lock" 18 RENAME_FILE_SUFFIX = ".rename" 19 20 21 class JournalFileBaseLock(abc.ABC): 22 @abc.abstractmethod 23 def acquire(self) -> bool: 24 raise NotImplementedError 25 26 @abc.abstractmethod 27 def release(self) -> None: 28 raise NotImplementedError 29 30 31 class JournalFileSymlinkLock(JournalFileBaseLock): 32 """Lock class for synchronizing processes for NFSv2 or later. 33 34 On acquiring the lock, link system call is called to create an exclusive file. The file is 35 deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of 36 :class:`~optuna.storages.JournalFileOpenLock` 37 38 Args: 39 filepath: 40 The path of the file whose race condition must be protected. 41 """ 42 43 def __init__(self, filepath: str) -> None: 44 self._lock_target_file = filepath 45 self._lock_file = filepath + LOCK_FILE_SUFFIX 46 self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX 47 48 def acquire(self) -> bool: 49 """Acquire a lock in a blocking way by creating a symbolic link of a file. 50 51 Returns: 52 :obj:`True` if it succeeded in creating a symbolic link of `self._lock_target_file`. 53 54 """ 55 sleep_secs = 0.001 56 while True: 57 try: 58 os.symlink(self._lock_target_file, self._lock_file) 59 return True 60 except OSError as err: 61 if err.errno == errno.EEXIST: 62 time.sleep(sleep_secs) 63 sleep_secs = min(sleep_secs * 2, 1) 64 continue 65 raise err 66 except BaseException: 67 self.release() 68 raise 69 70 def release(self) -> None: 71 """Release a lock by removing the symbolic link.""" 72 73 try: 74 os.rename(self._lock_file, self._lock_rename_file) 75 os.unlink(self._lock_rename_file) 76 except OSError: 77 raise RuntimeError("Error: did not possess lock") 78 except BaseException: 79 os.unlink(self._lock_rename_file) 80 raise 81 82 83 class JournalFileOpenLock(JournalFileBaseLock): 84 """Lock class for synchronizing processes for NFSv3 or later. 85 86 On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive 87 file. The file is deleted when the lock is released. This class is only supported when using 88 NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use 89 :class:`~optuna.storages.JournalFileSymlinkLock`. 90 91 Args: 92 filepath: 93 The path of the file whose race condition must be protected. 94 """ 95 96 def __init__(self, filepath: str) -> None: 97 self._lock_file = filepath + LOCK_FILE_SUFFIX 98 self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX 99 100 def acquire(self) -> bool: 101 """Acquire a lock in a blocking way by creating a lock file. 102 103 Returns: 104 :obj:`True` if it succeeded in creating a `self._lock_file` 105 106 """ 107 sleep_secs = 0.001 108 while True: 109 try: 110 open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY 111 os.close(os.open(self._lock_file, open_flags)) 112 return True 113 except OSError as err: 114 if err.errno == errno.EEXIST: 115 time.sleep(sleep_secs) 116 sleep_secs = min(sleep_secs * 2, 1) 117 continue 118 raise err 119 except BaseException: 120 self.release() 121 raise 122 123 def release(self) -> None: 124 """Release a lock by removing the created file.""" 125 126 try: 127 os.rename(self._lock_file, self._lock_rename_file) 128 os.unlink(self._lock_rename_file) 129 except OSError: 130 raise RuntimeError("Error: did not possess lock") 131 except BaseException: 132 os.unlink(self._lock_rename_file) 133 raise 134 135 136 @contextmanager 137 def get_lock_file(lock_obj: JournalFileBaseLock) -> Iterator[None]: 138 lock_obj.acquire() 139 try: 140 yield 141 finally: 142 lock_obj.release() 143 144 145 class JournalFileStorage(BaseJournalLogStorage): 146 """File storage class for Journal log backend. 147 148 Args: 149 file_path: 150 Path of file to persist the log to. 151 152 lock_obj: 153 Lock object for process exclusivity. 154 155 """ 156 157 def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None: 158 self._file_path: str = file_path 159 self._lock = lock_obj or JournalFileSymlinkLock(self._file_path) 160 open(self._file_path, "a").close() # Create a file if it does not exist 161 self._log_number_offset: Dict[int, int] = {0: 0} 162 163 def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]: 164 logs = [] 165 with open(self._file_path, "r") as f: 166 log_number_start = 0 167 if log_number_from in self._log_number_offset: 168 f.seek(self._log_number_offset[log_number_from]) 169 log_number_start = log_number_from 170 171 last_decode_error = None 172 for log_number, line in enumerate(f, start=log_number_start): 173 if last_decode_error is not None: 174 raise last_decode_error 175 if log_number + 1 not in self._log_number_offset: 176 byte_len = len(line.encode("utf-8")) 177 self._log_number_offset[log_number + 1] = ( 178 self._log_number_offset[log_number] + byte_len 179 ) 180 if log_number < log_number_from: 181 continue 182 try: 183 logs.append(json.loads(line)) 184 except json.JSONDecodeError as err: 185 last_decode_error = err 186 del self._log_number_offset[log_number + 1] 187 188 return logs 189 190 def append_logs(self, logs: List[Dict[str, Any]]) -> None: 191 with get_lock_file(self._lock): 192 what_to_write = "\n".join([json.dumps(log) for log in logs]) + "\n" 193 with open(self._file_path, "a", encoding="utf-8") as f: 194 f.write(what_to_write) 195 f.flush() 196 os.fsync(f.fileno()) 197 [end of optuna/storages/_journal/file.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/optuna/storages/_journal/file.py b/optuna/storages/_journal/file.py --- a/optuna/storages/_journal/file.py +++ b/optuna/storages/_journal/file.py @@ -157,12 +157,12 @@ def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None: self._file_path: str = file_path self._lock = lock_obj or JournalFileSymlinkLock(self._file_path) - open(self._file_path, "a").close() # Create a file if it does not exist + open(self._file_path, "ab").close() # Create a file if it does not exist self._log_number_offset: Dict[int, int] = {0: 0} def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]: logs = [] - with open(self._file_path, "r") as f: + with open(self._file_path, "rb") as f: log_number_start = 0 if log_number_from in self._log_number_offset: f.seek(self._log_number_offset[log_number_from]) @@ -173,7 +173,7 @@ if last_decode_error is not None: raise last_decode_error if log_number + 1 not in self._log_number_offset: - byte_len = len(line.encode("utf-8")) + byte_len = len(line) self._log_number_offset[log_number + 1] = ( self._log_number_offset[log_number] + byte_len ) @@ -190,7 +190,7 @@ def append_logs(self, logs: List[Dict[str, Any]]) -> None: with get_lock_file(self._lock): what_to_write = "\n".join([json.dumps(log) for log in logs]) + "\n" - with open(self._file_path, "a", encoding="utf-8") as f: - f.write(what_to_write) + with open(self._file_path, "ab") as f: + f.write(what_to_write.encode("utf-8")) f.flush() os.fsync(f.fileno())
{"golden_diff": "diff --git a/optuna/storages/_journal/file.py b/optuna/storages/_journal/file.py\n--- a/optuna/storages/_journal/file.py\n+++ b/optuna/storages/_journal/file.py\n@@ -157,12 +157,12 @@\n def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:\n self._file_path: str = file_path\n self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)\n- open(self._file_path, \"a\").close() # Create a file if it does not exist\n+ open(self._file_path, \"ab\").close() # Create a file if it does not exist\n self._log_number_offset: Dict[int, int] = {0: 0}\n \n def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:\n logs = []\n- with open(self._file_path, \"r\") as f:\n+ with open(self._file_path, \"rb\") as f:\n log_number_start = 0\n if log_number_from in self._log_number_offset:\n f.seek(self._log_number_offset[log_number_from])\n@@ -173,7 +173,7 @@\n if last_decode_error is not None:\n raise last_decode_error\n if log_number + 1 not in self._log_number_offset:\n- byte_len = len(line.encode(\"utf-8\"))\n+ byte_len = len(line)\n self._log_number_offset[log_number + 1] = (\n self._log_number_offset[log_number] + byte_len\n )\n@@ -190,7 +190,7 @@\n def append_logs(self, logs: List[Dict[str, Any]]) -> None:\n with get_lock_file(self._lock):\n what_to_write = \"\\n\".join([json.dumps(log) for log in logs]) + \"\\n\"\n- with open(self._file_path, \"a\", encoding=\"utf-8\") as f:\n- f.write(what_to_write)\n+ with open(self._file_path, \"ab\") as f:\n+ f.write(what_to_write.encode(\"utf-8\"))\n f.flush()\n os.fsync(f.fileno())\n", "issue": "JournalStorage not working on Windows10\n### Expected behavior\r\n\r\nI was using the minimal JournalStorage example from the optuna documentation on my Windows10 PC. \r\nHowever the script is not able to run and throws every time json decoding errors.\r\n\r\nAccording to the latest documentation I didn\u00b4t see any limitations regarding the OS on which JournalStorage can be used. But maybe I was missing something.\r\n\r\n\r\n### Environment\r\n\r\n- Optuna version:3.1.0.dev\r\n- Python version:3.10.6\r\n- OS:Windows-10-10.0.19044-SP0\r\n- (Optional) Other libraries and their versions:\r\n\r\n\r\n### Error messages, stack traces, or logs\r\n\r\n```shell\r\nC:\\1_Files\\M\\optuna_trial.py:9: ExperimentalWarning: JournalStorage is experimental (supported from v3.1.0). The interface can change in the future.\r\n storage = optuna.storages.JournalStorage(\r\nTraceback (most recent call last):\r\n File \"C:\\1_Files\\M\\optuna_trial.py\", line 12, in <module>\r\n study = optuna.create_study(storage=storage)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\_convert_positional_args.py\", line 63, in converter_wrapper\r\n return func(**kwargs)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\study\\study.py\", line 1155, in create_study\r\n study_id = storage.create_new_study(study_name)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\storage.py\", line 111, in create_new_study\r\n self._sync_with_backend()\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\storage.py\", line 103, in _sync_with_backend\r\n logs = self._backend.read_logs(self._replay_result.log_number_read)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\file.py\", line 174, in read_logs\r\n raise last_decode_error\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\site-packages\\optuna\\storages\\_journal\\file.py\", line 184, in read_logs\r\n logs.append(json.loads(line))\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\__init__.py\", line 346, in loads\r\n return _default_decoder.decode(s)\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 337, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 353, in raw_decode\r\n obj, end = self.scan_once(s, idx)\r\njson.decoder.JSONDecodeError: Invalid control character at: line 1 column 3 (char 2)\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. Run any JournalStorage example on a Windows10 PC\r\n2. Python finishes with error\r\n3.\r\n```python\r\nimport optuna\r\n\r\n\r\ndef objective(trial):\r\n x = trial.suggest_float(\"x\", -10, 10)\r\n return (x - 2) ** 2\r\n\r\n\r\nstorage = optuna.storages.JournalStorage(\r\n optuna.storages.JournalFileStorage(\"./log_file\"))\r\n\r\nstudy = optuna.create_study(storage=storage)\r\nstudy.optimize(objective)\r\n```\r\n\r\n\r\n### Additional context (optional)\r\n\r\nrunning the script as an admin yields a slightly different error\r\n\r\n```\r\n File \"C:\\Users\\Prometheus\\miniconda3\\envs\\py310\\lib\\json\\decoder.py\", line 355, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 2 column 1 (char 1)\r\n```\n", "before_files": [{"content": "import abc\nfrom contextlib import contextmanager\nimport errno\nimport json\nimport os\nimport time\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nimport uuid\n\nfrom optuna.storages._journal.base import BaseJournalLogStorage\n\n\nLOCK_FILE_SUFFIX = \".lock\"\nRENAME_FILE_SUFFIX = \".rename\"\n\n\nclass JournalFileBaseLock(abc.ABC):\n @abc.abstractmethod\n def acquire(self) -> bool:\n raise NotImplementedError\n\n @abc.abstractmethod\n def release(self) -> None:\n raise NotImplementedError\n\n\nclass JournalFileSymlinkLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv2 or later.\n\n On acquiring the lock, link system call is called to create an exclusive file. The file is\n deleted when the lock is released. In NFS environments prior to NFSv3, use this instead of\n :class:`~optuna.storages.JournalFileOpenLock`\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_target_file = filepath\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a symbolic link of a file.\n\n Returns:\n :obj:`True` if it succeeded in creating a symbolic link of `self._lock_target_file`.\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n os.symlink(self._lock_target_file, self._lock_file)\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the symbolic link.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\nclass JournalFileOpenLock(JournalFileBaseLock):\n \"\"\"Lock class for synchronizing processes for NFSv3 or later.\n\n On acquiring the lock, open system call is called with the O_EXCL option to create an exclusive\n file. The file is deleted when the lock is released. This class is only supported when using\n NFSv3 or later on kernel 2.6 or later. In prior NFS environments, use\n :class:`~optuna.storages.JournalFileSymlinkLock`.\n\n Args:\n filepath:\n The path of the file whose race condition must be protected.\n \"\"\"\n\n def __init__(self, filepath: str) -> None:\n self._lock_file = filepath + LOCK_FILE_SUFFIX\n self._lock_rename_file = self._lock_file + str(uuid.uuid4()) + RENAME_FILE_SUFFIX\n\n def acquire(self) -> bool:\n \"\"\"Acquire a lock in a blocking way by creating a lock file.\n\n Returns:\n :obj:`True` if it succeeded in creating a `self._lock_file`\n\n \"\"\"\n sleep_secs = 0.001\n while True:\n try:\n open_flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n os.close(os.open(self._lock_file, open_flags))\n return True\n except OSError as err:\n if err.errno == errno.EEXIST:\n time.sleep(sleep_secs)\n sleep_secs = min(sleep_secs * 2, 1)\n continue\n raise err\n except BaseException:\n self.release()\n raise\n\n def release(self) -> None:\n \"\"\"Release a lock by removing the created file.\"\"\"\n\n try:\n os.rename(self._lock_file, self._lock_rename_file)\n os.unlink(self._lock_rename_file)\n except OSError:\n raise RuntimeError(\"Error: did not possess lock\")\n except BaseException:\n os.unlink(self._lock_rename_file)\n raise\n\n\n@contextmanager\ndef get_lock_file(lock_obj: JournalFileBaseLock) -> Iterator[None]:\n lock_obj.acquire()\n try:\n yield\n finally:\n lock_obj.release()\n\n\nclass JournalFileStorage(BaseJournalLogStorage):\n \"\"\"File storage class for Journal log backend.\n\n Args:\n file_path:\n Path of file to persist the log to.\n\n lock_obj:\n Lock object for process exclusivity.\n\n \"\"\"\n\n def __init__(self, file_path: str, lock_obj: Optional[JournalFileBaseLock] = None) -> None:\n self._file_path: str = file_path\n self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)\n open(self._file_path, \"a\").close() # Create a file if it does not exist\n self._log_number_offset: Dict[int, int] = {0: 0}\n\n def read_logs(self, log_number_from: int) -> List[Dict[str, Any]]:\n logs = []\n with open(self._file_path, \"r\") as f:\n log_number_start = 0\n if log_number_from in self._log_number_offset:\n f.seek(self._log_number_offset[log_number_from])\n log_number_start = log_number_from\n\n last_decode_error = None\n for log_number, line in enumerate(f, start=log_number_start):\n if last_decode_error is not None:\n raise last_decode_error\n if log_number + 1 not in self._log_number_offset:\n byte_len = len(line.encode(\"utf-8\"))\n self._log_number_offset[log_number + 1] = (\n self._log_number_offset[log_number] + byte_len\n )\n if log_number < log_number_from:\n continue\n try:\n logs.append(json.loads(line))\n except json.JSONDecodeError as err:\n last_decode_error = err\n del self._log_number_offset[log_number + 1]\n\n return logs\n\n def append_logs(self, logs: List[Dict[str, Any]]) -> None:\n with get_lock_file(self._lock):\n what_to_write = \"\\n\".join([json.dumps(log) for log in logs]) + \"\\n\"\n with open(self._file_path, \"a\", encoding=\"utf-8\") as f:\n f.write(what_to_write)\n f.flush()\n os.fsync(f.fileno())\n", "path": "optuna/storages/_journal/file.py"}]}
3,490
507
gh_patches_debug_28558
rasdani/github-patches
git_diff
encode__uvicorn-420
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update 'websockets' to 8.x I realize that https://github.com/encode/uvicorn/pull/396 was done because the tests appear to be failing with `websockets>=8.0`, but that release also fixed a bunch of DeprecationWarnings that we've been seeing in [httpx builds on 3.8](https://travis-ci.org/encode/httpx/jobs/574949180). Since it's only a `DeprecationWarning`, and only on 3.8 ([3.7 and below is fine](https://travis-ci.org/encode/httpx/jobs/574949179)), I definitely would agree that this is a low priority issue. I dug into the tests to try and figure out what exactly was going wrong, but the `AttributeError`s I looked at looks like the main issue is two missing functions in [websockets_impl.py](https://github.com/encode/uvicorn/blob/master/uvicorn/protocols/websockets/websockets_impl.py) `15 AttributeError: 'Server' object has no attribute 'is_serving'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/server.py#L642-L668 ` 9 AttributeError: 'WebSocketProtocol' object has no attribute 'transfer_data_task'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/protocol.py#L276-L313 </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import re 6 import sys 7 import platform 8 9 from setuptools import setup 10 11 12 def get_version(package): 13 """ 14 Return package version as listed in `__version__` in `init.py`. 15 """ 16 path = os.path.join(package, '__init__.py') 17 init_py = open(path, 'r', encoding='utf8').read() 18 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) 19 20 21 def get_long_description(): 22 """ 23 Return the README. 24 """ 25 return open('README.md', 'r', encoding='utf8').read() 26 27 28 def get_packages(package): 29 """ 30 Return root package and all sub-packages. 31 """ 32 return [dirpath 33 for dirpath, dirnames, filenames in os.walk(package) 34 if os.path.exists(os.path.join(dirpath, '__init__.py'))] 35 36 37 env_marker = ( 38 "sys_platform != 'win32'" 39 " and sys_platform != 'cygwin'" 40 " and platform_python_implementation != 'pypy'" 41 ) 42 43 requirements = [ 44 "click==7.*", 45 "h11==0.8.*", 46 "websockets==7.*", 47 "httptools==0.0.13 ;" + env_marker, 48 "uvloop==0.* ;" + env_marker, 49 ] 50 51 52 setup( 53 name='uvicorn', 54 version=get_version('uvicorn'), 55 url='https://github.com/encode/uvicorn', 56 license='BSD', 57 description='The lightning-fast ASGI server.', 58 long_description=get_long_description(), 59 long_description_content_type='text/markdown', 60 author='Tom Christie', 61 author_email='[email protected]', 62 packages=get_packages('uvicorn'), 63 install_requires=requirements, 64 data_files = [("", ["LICENSE.md"])], 65 classifiers=[ 66 'Development Status :: 3 - Alpha', 67 'Environment :: Web Environment', 68 'Intended Audience :: Developers', 69 'License :: OSI Approved :: BSD License', 70 'Operating System :: OS Independent', 71 'Topic :: Internet :: WWW/HTTP', 72 'Programming Language :: Python :: 3', 73 'Programming Language :: Python :: 3.5', 74 'Programming Language :: Python :: 3.6', 75 'Programming Language :: Python :: 3.7', 76 'Programming Language :: Python :: Implementation :: CPython', 77 'Programming Language :: Python :: Implementation :: PyPy', 78 ], 79 entry_points=""" 80 [console_scripts] 81 uvicorn=uvicorn.main:main 82 """ 83 ) 84 [end of setup.py] [start of uvicorn/protocols/websockets/websockets_impl.py] 1 import asyncio 2 import http 3 from urllib.parse import unquote 4 5 import websockets 6 7 from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl 8 9 10 class Server: 11 closing = False 12 13 def register(self, ws): 14 pass 15 16 def unregister(self, ws): 17 pass 18 19 20 class WebSocketProtocol(websockets.WebSocketServerProtocol): 21 def __init__(self, config, server_state, _loop=None): 22 if not config.loaded: 23 config.load() 24 25 self.config = config 26 self.app = config.loaded_app 27 self.loop = _loop or asyncio.get_event_loop() 28 self.logger = config.logger_instance 29 self.root_path = config.root_path 30 31 # Shared server state 32 self.connections = server_state.connections 33 self.tasks = server_state.tasks 34 35 # Connection state 36 self.transport = None 37 self.server = None 38 self.client = None 39 self.scheme = None 40 41 # Connection events 42 self.scope = None 43 self.handshake_started_event = asyncio.Event() 44 self.handshake_completed_event = asyncio.Event() 45 self.closed_event = asyncio.Event() 46 self.initial_response = None 47 self.connect_sent = False 48 self.accepted_subprotocol = None 49 50 server = Server() 51 52 super().__init__(ws_handler=self.ws_handler, ws_server=server) 53 54 def connection_made(self, transport): 55 self.connections.add(self) 56 self.transport = transport 57 self.server = get_local_addr(transport) 58 self.client = get_remote_addr(transport) 59 self.scheme = "wss" if is_ssl(transport) else "ws" 60 super().connection_made(transport) 61 62 def connection_lost(self, exc): 63 self.connections.remove(self) 64 self.handshake_completed_event.set() 65 super().connection_lost(exc) 66 67 def shutdown(self): 68 self.transport.close() 69 70 def on_task_complete(self, task): 71 self.tasks.discard(task) 72 73 async def process_request(self, path, headers): 74 """ 75 This hook is called to determine if the websocket should return 76 an HTTP response and close. 77 78 Our behavior here is to start the ASGI application, and then wait 79 for either `accept` or `close` in order to determine if we should 80 close the connection. 81 """ 82 path_portion, _, query_string = path.partition("?") 83 84 websockets.handshake.check_request(headers) 85 86 subprotocols = [] 87 for header in headers.get_all("Sec-WebSocket-Protocol"): 88 subprotocols.extend([token.strip() for token in header.split(",")]) 89 90 asgi_headers = [ 91 (name.encode("ascii"), value.encode("ascii")) 92 for name, value in headers.raw_items() 93 ] 94 95 self.scope = { 96 "type": "websocket", 97 "scheme": self.scheme, 98 "server": self.server, 99 "client": self.client, 100 "root_path": self.root_path, 101 "path": unquote(path_portion), 102 "raw_path": path_portion, 103 "query_string": query_string.encode("ascii"), 104 "headers": asgi_headers, 105 "subprotocols": subprotocols, 106 } 107 task = self.loop.create_task(self.run_asgi()) 108 task.add_done_callback(self.on_task_complete) 109 self.tasks.add(task) 110 await self.handshake_started_event.wait() 111 return self.initial_response 112 113 def process_subprotocol(self, headers, available_subprotocols): 114 """ 115 We override the standard 'process_subprotocol' behavior here so that 116 we return whatever subprotocol is sent in the 'accept' message. 117 """ 118 return self.accepted_subprotocol 119 120 def send_500_response(self): 121 msg = b"Internal Server Error" 122 content = [ 123 b"HTTP/1.1 500 Internal Server Error\r\n" 124 b"content-type: text/plain; charset=utf-8\r\n", 125 b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", 126 b"connection: close\r\n", 127 b"\r\n", 128 msg, 129 ] 130 self.transport.write(b"".join(content)) 131 132 async def ws_handler(self, protocol, path): 133 """ 134 This is the main handler function for the 'websockets' implementation 135 to call into. We just wait for close then return, and instead allow 136 'send' and 'receive' events to drive the flow. 137 """ 138 self.handshake_completed_event.set() 139 await self.closed_event.wait() 140 141 async def run_asgi(self): 142 """ 143 Wrapper around the ASGI callable, handling exceptions and unexpected 144 termination states. 145 """ 146 try: 147 result = await self.app(self.scope, self.asgi_receive, self.asgi_send) 148 except BaseException as exc: 149 self.closed_event.set() 150 msg = "Exception in ASGI application\n" 151 self.logger.error(msg, exc_info=exc) 152 if not self.handshake_started_event.is_set(): 153 self.send_500_response() 154 else: 155 await self.handshake_completed_event.wait() 156 self.transport.close() 157 else: 158 self.closed_event.set() 159 if not self.handshake_started_event.is_set(): 160 msg = "ASGI callable returned without sending handshake." 161 self.logger.error(msg) 162 self.send_500_response() 163 self.transport.close() 164 elif result is not None: 165 msg = "ASGI callable should return None, but returned '%s'." 166 self.logger.error(msg, result) 167 await self.handshake_completed_event.wait() 168 self.transport.close() 169 170 async def asgi_send(self, message): 171 message_type = message["type"] 172 173 if not self.handshake_started_event.is_set(): 174 if message_type == "websocket.accept": 175 self.logger.info( 176 '%s - "WebSocket %s" [accepted]', 177 self.scope["client"], 178 self.scope["root_path"] + self.scope["path"], 179 ) 180 self.initial_response = None 181 self.accepted_subprotocol = message.get("subprotocol") 182 self.handshake_started_event.set() 183 184 elif message_type == "websocket.close": 185 self.logger.info( 186 '%s - "WebSocket %s" 403', 187 self.scope["client"], 188 self.scope["root_path"] + self.scope["path"], 189 ) 190 self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"") 191 self.handshake_started_event.set() 192 self.closed_event.set() 193 194 else: 195 msg = "Expected ASGI message 'websocket.accept' or 'websocket.close', but got '%s'." 196 raise RuntimeError(msg % message_type) 197 198 elif not self.closed_event.is_set(): 199 await self.handshake_completed_event.wait() 200 201 if message_type == "websocket.send": 202 bytes_data = message.get("bytes") 203 text_data = message.get("text") 204 data = text_data if bytes_data is None else bytes_data 205 await self.send(data) 206 207 elif message_type == "websocket.close": 208 code = message.get("code", 1000) 209 await self.close(code) 210 self.closed_event.set() 211 212 else: 213 msg = "Expected ASGI message 'websocket.send' or 'websocket.close', but got '%s'." 214 raise RuntimeError(msg % message_type) 215 216 else: 217 msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." 218 raise RuntimeError(msg % message_type) 219 220 async def asgi_receive(self): 221 if not self.connect_sent: 222 self.connect_sent = True 223 return {"type": "websocket.connect"} 224 225 await self.handshake_completed_event.wait() 226 try: 227 data = await self.recv() 228 except websockets.ConnectionClosed as exc: 229 return {"type": "websocket.disconnect", "code": exc.code} 230 231 msg = {"type": "websocket.receive"} 232 233 if isinstance(data, str): 234 msg["text"] = data 235 else: 236 msg["bytes"] = data 237 238 return msg 239 [end of uvicorn/protocols/websockets/websockets_impl.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ requirements = [ "click==7.*", "h11==0.8.*", - "websockets==7.*", + "websockets==8.*", "httptools==0.0.13 ;" + env_marker, "uvloop==0.* ;" + env_marker, ] diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py --- a/uvicorn/protocols/websockets/websockets_impl.py +++ b/uvicorn/protocols/websockets/websockets_impl.py @@ -16,6 +16,9 @@ def unregister(self, ws): pass + def is_serving(self): + return not self.closing + class WebSocketProtocol(websockets.WebSocketServerProtocol): def __init__(self, config, server_state, _loop=None): @@ -46,10 +49,11 @@ self.initial_response = None self.connect_sent = False self.accepted_subprotocol = None + self.transfer_data_task = None - server = Server() + self.ws_server = Server() - super().__init__(ws_handler=self.ws_handler, ws_server=server) + super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server) def connection_made(self, transport): self.connections.add(self) @@ -65,6 +69,7 @@ super().connection_lost(exc) def shutdown(self): + self.ws_server.closing = True self.transport.close() def on_task_complete(self, task):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -43,7 +43,7 @@\n requirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n- \"websockets==7.*\",\n+ \"websockets==8.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.* ;\" + env_marker,\n ]\ndiff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py\n--- a/uvicorn/protocols/websockets/websockets_impl.py\n+++ b/uvicorn/protocols/websockets/websockets_impl.py\n@@ -16,6 +16,9 @@\n def unregister(self, ws):\n pass\n \n+ def is_serving(self):\n+ return not self.closing\n+\n \n class WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n@@ -46,10 +49,11 @@\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n+ self.transfer_data_task = None\n \n- server = Server()\n+ self.ws_server = Server()\n \n- super().__init__(ws_handler=self.ws_handler, ws_server=server)\n+ super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n \n def connection_made(self, transport):\n self.connections.add(self)\n@@ -65,6 +69,7 @@\n super().connection_lost(exc)\n \n def shutdown(self):\n+ self.ws_server.closing = True\n self.transport.close()\n \n def on_task_complete(self, task):\n", "issue": "Update 'websockets' to 8.x\nI realize that https://github.com/encode/uvicorn/pull/396 was done because the tests appear to be failing with `websockets>=8.0`, but that release also fixed a bunch of DeprecationWarnings that we've been seeing in [httpx builds on 3.8](https://travis-ci.org/encode/httpx/jobs/574949180).\r\n\r\nSince it's only a `DeprecationWarning`, and only on 3.8 ([3.7 and below is fine](https://travis-ci.org/encode/httpx/jobs/574949179)), I definitely would agree that this is a low priority issue.\r\n\r\nI dug into the tests to try and figure out what exactly was going wrong, but the `AttributeError`s I looked at looks like the main issue is two missing functions in [websockets_impl.py](https://github.com/encode/uvicorn/blob/master/uvicorn/protocols/websockets/websockets_impl.py)\r\n\r\n`15 AttributeError: 'Server' object has no attribute 'is_serving'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/server.py#L642-L668\r\n` 9 AttributeError: 'WebSocketProtocol' object has no attribute 'transfer_data_task'` --> https://github.com/aaugustin/websockets/blob/8.0.2/src/websockets/protocol.py#L276-L313\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport platform\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, '__init__.py')\n init_py = open(path, 'r', encoding='utf8').read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open('README.md', 'r', encoding='utf8').read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nenv_marker = (\n \"sys_platform != 'win32'\"\n \" and sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'pypy'\"\n)\n\nrequirements = [\n \"click==7.*\",\n \"h11==0.8.*\",\n \"websockets==7.*\",\n \"httptools==0.0.13 ;\" + env_marker,\n \"uvloop==0.* ;\" + env_marker,\n]\n\n\nsetup(\n name='uvicorn',\n version=get_version('uvicorn'),\n url='https://github.com/encode/uvicorn',\n license='BSD',\n description='The lightning-fast ASGI server.',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Tom Christie',\n author_email='[email protected]',\n packages=get_packages('uvicorn'),\n install_requires=requirements,\n data_files = [(\"\", [\"LICENSE.md\"])],\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet :: WWW/HTTP',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\"\n)\n", "path": "setup.py"}, {"content": "import asyncio\nimport http\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = config.logger_instance\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n\n server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.accept' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = \"Expected ASGI message 'websocket.send' or 'websocket.close', but got '%s'.\"\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n try:\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}]}
3,906
387
gh_patches_debug_16631
rasdani/github-patches
git_diff
google__osv.dev-986
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The purl of Alpine ecosystem is inconsistent with purl-spec According to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`. As a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project. </issue> <code> [start of osv/purl_helpers.py] 1 # Copyright 2022 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """PURL conversion utilities.""" 15 16 from urllib.parse import quote 17 18 PURL_ECOSYSTEMS = { 19 'crates.io': 'cargo', 20 'Debian': 'deb', 21 'Hex': 'hex', 22 'Go': 'golang', 23 'Maven': 'maven', 24 'NuGet': 'nuget', 25 'npm': 'npm', 26 'Packagist': 'composer', 27 'OSS-Fuzz': 'generic', 28 'PyPI': 'pypi', 29 'RubyGems': 'gem', 30 } 31 32 33 def _url_encode(package_name): 34 """URL encode a PURL `namespace/name` or `name`.""" 35 parts = package_name.split('/') 36 return '/'.join(quote(p) for p in parts) 37 38 39 def package_to_purl(ecosystem, package_name): 40 """Convert a ecosystem and package name to PURL.""" 41 purl_type = PURL_ECOSYSTEMS.get(ecosystem) 42 if not purl_type: 43 return None 44 45 suffix = '' 46 47 if purl_type == 'maven': 48 # PURLs use / to separate the group ID and the artifact ID. 49 package_name = package_name.replace(':', '/', 1) 50 51 if purl_type == 'deb': 52 package_name = 'debian/' + package_name 53 suffix = '?arch=source' 54 55 return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}' 56 [end of osv/purl_helpers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py --- a/osv/purl_helpers.py +++ b/osv/purl_helpers.py @@ -16,6 +16,7 @@ from urllib.parse import quote PURL_ECOSYSTEMS = { + 'Alpine': 'apk', 'crates.io': 'cargo', 'Debian': 'deb', 'Hex': 'hex', @@ -48,8 +49,12 @@ # PURLs use / to separate the group ID and the artifact ID. package_name = package_name.replace(':', '/', 1) - if purl_type == 'deb': + if purl_type == 'deb' and ecosystem == 'Debian': package_name = 'debian/' + package_name suffix = '?arch=source' + if purl_type == 'apk' and ecosystem == 'Alpine': + package_name = 'alpine/' + package_name + suffix = '?arch=source' + return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'
{"golden_diff": "diff --git a/osv/purl_helpers.py b/osv/purl_helpers.py\n--- a/osv/purl_helpers.py\n+++ b/osv/purl_helpers.py\n@@ -16,6 +16,7 @@\n from urllib.parse import quote\n \n PURL_ECOSYSTEMS = {\n+ 'Alpine': 'apk',\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n@@ -48,8 +49,12 @@\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n \n- if purl_type == 'deb':\n+ if purl_type == 'deb' and ecosystem == 'Debian':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n \n+ if purl_type == 'apk' and ecosystem == 'Alpine':\n+ package_name = 'alpine/' + package_name\n+ suffix = '?arch=source'\n+\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "issue": "The purl of Alpine ecosystem is inconsistent with purl-spec\nAccording to [**purl-type `apk`**](https://github.com/package-url/purl-spec/blob/master/PURL-TYPES.rst#apk) ([PR](https://github.com/package-url/purl-spec/pull/171)), it seems that the purl type of `Alpine` should be `apk` and the purl namespace can be `alpine`. In this project, however, the purl type of `Alpine` is `alpine`.\r\n\r\nAs a result, the purl of a package is **different** when apply different standards. For example, the purl of the `curl` package is `pkg:apk/alpine/curl` according to purl-spec while it is `pkg:alpine/curl` in this project.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"PURL conversion utilities.\"\"\"\n\nfrom urllib.parse import quote\n\nPURL_ECOSYSTEMS = {\n 'crates.io': 'cargo',\n 'Debian': 'deb',\n 'Hex': 'hex',\n 'Go': 'golang',\n 'Maven': 'maven',\n 'NuGet': 'nuget',\n 'npm': 'npm',\n 'Packagist': 'composer',\n 'OSS-Fuzz': 'generic',\n 'PyPI': 'pypi',\n 'RubyGems': 'gem',\n}\n\n\ndef _url_encode(package_name):\n \"\"\"URL encode a PURL `namespace/name` or `name`.\"\"\"\n parts = package_name.split('/')\n return '/'.join(quote(p) for p in parts)\n\n\ndef package_to_purl(ecosystem, package_name):\n \"\"\"Convert a ecosystem and package name to PURL.\"\"\"\n purl_type = PURL_ECOSYSTEMS.get(ecosystem)\n if not purl_type:\n return None\n\n suffix = ''\n\n if purl_type == 'maven':\n # PURLs use / to separate the group ID and the artifact ID.\n package_name = package_name.replace(':', '/', 1)\n\n if purl_type == 'deb':\n package_name = 'debian/' + package_name\n suffix = '?arch=source'\n\n return f'pkg:{purl_type}/{_url_encode(package_name)}{suffix}'\n", "path": "osv/purl_helpers.py"}]}
1,256
244
gh_patches_debug_30461
rasdani/github-patches
git_diff
pyodide__pyodide-3811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Allow specifying output directory in pyodide build with -o or --outdir I added the capability to specify output directory in pyodide build with `--output-directory`. But here `pypa/build` accepts either `--outdir` or `-o`. We should accept these as ways to specify the output directory for consistency </issue> <code> [start of pyodide-build/pyodide_build/cli/build.py] 1 import re 2 import shutil 3 import tempfile 4 from pathlib import Path 5 from typing import Optional 6 from urllib.parse import urlparse 7 8 import requests 9 import typer 10 11 from .. import common 12 from ..out_of_tree import build 13 from ..out_of_tree.pypi import ( 14 build_dependencies_for_wheel, 15 build_wheels_from_pypi_requirements, 16 fetch_pypi_package, 17 ) 18 from ..out_of_tree.utils import initialize_pyodide_root 19 20 21 def pypi( 22 package: str, 23 output_directory: Path, 24 exports: str = typer.Option( 25 "requested", 26 help="Which symbols should be exported when linking .so files?", 27 ), 28 ctx: typer.Context = typer.Context, 29 ) -> Path: 30 """Fetch a wheel from pypi, or build from source if none available.""" 31 initialize_pyodide_root() 32 common.check_emscripten_version() 33 backend_flags = ctx.args 34 with tempfile.TemporaryDirectory() as tmpdir: 35 srcdir = Path(tmpdir) 36 37 # get package from pypi 38 package_path = fetch_pypi_package(package, srcdir) 39 if not package_path.is_dir(): 40 # a pure-python wheel has been downloaded - just copy to dist folder 41 dest_file = output_directory / package_path.name 42 shutil.copyfile(str(package_path), output_directory / package_path.name) 43 print(f"Successfully fetched: {package_path.name}") 44 return dest_file 45 46 built_wheel = build.run(srcdir, output_directory, exports, backend_flags) 47 return built_wheel 48 49 50 def download_url(url: str, output_directory: Path) -> str: 51 with requests.get(url, stream=True) as response: 52 urlpath = Path(urlparse(response.url).path) 53 if urlpath.suffix == ".gz": 54 urlpath = urlpath.with_suffix("") 55 file_name = urlpath.name 56 with open(output_directory / file_name, "wb") as f: 57 for chunk in response.iter_content(chunk_size=1 << 20): 58 f.write(chunk) 59 return file_name 60 61 62 def url( 63 package_url: str, 64 output_directory: Path, 65 exports: str = typer.Option( 66 "requested", 67 help="Which symbols should be exported when linking .so files?", 68 ), 69 ctx: typer.Context = typer.Context, 70 ) -> Path: 71 """Fetch a wheel or build sdist from url.""" 72 initialize_pyodide_root() 73 common.check_emscripten_version() 74 backend_flags = ctx.args 75 with tempfile.TemporaryDirectory() as tmpdir: 76 tmppath = Path(tmpdir) 77 filename = download_url(package_url, tmppath) 78 if Path(filename).suffix == ".whl": 79 shutil.move(tmppath / filename, output_directory / filename) 80 return output_directory / filename 81 82 builddir = tmppath / "build" 83 shutil.unpack_archive(tmppath / filename, builddir) 84 files = list(builddir.iterdir()) 85 if len(files) == 1 and files[0].is_dir(): 86 # unzipped into subfolder 87 builddir = files[0] 88 wheel_path = build.run(builddir, output_directory, exports, backend_flags) 89 return wheel_path 90 91 92 def source( 93 source_location: Path, 94 output_directory: Path, 95 exports: str = typer.Option( 96 "requested", 97 help="Which symbols should be exported when linking .so files?", 98 ), 99 ctx: typer.Context = typer.Context, 100 ) -> Path: 101 """Use pypa/build to build a Python package from source""" 102 initialize_pyodide_root() 103 common.check_emscripten_version() 104 backend_flags = ctx.args 105 built_wheel = build.run(source_location, output_directory, exports, backend_flags) 106 return built_wheel 107 108 109 # simple 'pyodide build' command 110 def main( 111 source_location: "Optional[str]" = typer.Argument( 112 "", 113 help="Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.", 114 ), 115 output_directory: str = typer.Option( 116 "./dist", 117 help="which directory should the output be placed into?", 118 ), 119 requirements_txt: str = typer.Option( 120 "", 121 "--requirements", 122 "-r", 123 help="Build a list of package requirements from a requirements.txt file", 124 ), 125 exports: str = typer.Option( 126 "requested", 127 help="Which symbols should be exported when linking .so files?", 128 ), 129 build_dependencies: bool = typer.Option( 130 False, help="Fetch non-pyodide dependencies from pypi and build them too." 131 ), 132 output_lockfile: str = typer.Option( 133 "", 134 help="Output list of resolved dependencies to a file in requirements.txt format", 135 ), 136 skip_dependency: list[str] = typer.Option( 137 [], 138 help="Skip building or resolving a single dependency. Use multiple times or provide a comma separated list to skip multiple dependencies.", 139 ), 140 compression_level: int = typer.Option( 141 6, help="Compression level to use for the created zip file" 142 ), 143 ctx: typer.Context = typer.Context, 144 ) -> None: 145 """Use pypa/build to build a Python package from source, pypi or url.""" 146 outpath = Path(output_directory).resolve() 147 outpath.mkdir(exist_ok=True) 148 extras: list[str] = [] 149 150 if len(requirements_txt) > 0: 151 # a requirements.txt - build it (and optionally deps) 152 if not Path(requirements_txt).exists(): 153 raise RuntimeError( 154 f"Couldn't find requirements text file {requirements_txt}" 155 ) 156 reqs = [] 157 with open(requirements_txt) as f: 158 raw_reqs = [x.strip() for x in f.readlines()] 159 for x in raw_reqs: 160 # remove comments 161 comment_pos = x.find("#") 162 if comment_pos != -1: 163 x = x[:comment_pos].strip() 164 if len(x) > 0: 165 if x[0] == "-": 166 raise RuntimeError( 167 f"pyodide build only supports name-based PEP508 requirements. [{x}] will not work." 168 ) 169 if x.find("@") != -1: 170 raise RuntimeError( 171 f"pyodide build does not support URL based requirements. [{x}] will not work" 172 ) 173 reqs.append(x) 174 try: 175 build_wheels_from_pypi_requirements( 176 reqs, 177 outpath, 178 build_dependencies, 179 skip_dependency, 180 exports, 181 ctx.args, 182 output_lockfile=output_lockfile, 183 ) 184 except BaseException as e: 185 import traceback 186 187 print("Failed building multiple wheels:", traceback.format_exc()) 188 raise e 189 return 190 191 if source_location is not None: 192 extras = re.findall(r"\[(\w+)\]", source_location) 193 if len(extras) != 0: 194 source_location = source_location[0 : source_location.find("[")] 195 if not source_location: 196 # build the current folder 197 wheel = source(Path.cwd(), outpath, exports, ctx) 198 elif source_location.find("://") != -1: 199 wheel = url(source_location, outpath, exports, ctx) 200 elif Path(source_location).is_dir(): 201 # a folder, build it 202 wheel = source(Path(source_location).resolve(), outpath, exports, ctx) 203 elif source_location.find("/") == -1: 204 # try fetch or build from pypi 205 wheel = pypi(source_location, outpath, exports, ctx) 206 else: 207 raise RuntimeError(f"Couldn't determine source type for {source_location}") 208 # now build deps for wheel 209 if build_dependencies: 210 try: 211 build_dependencies_for_wheel( 212 wheel, 213 extras, 214 skip_dependency, 215 exports, 216 ctx.args, 217 output_lockfile=output_lockfile, 218 compression_level=compression_level, 219 ) 220 except BaseException as e: 221 import traceback 222 223 print("Failed building dependencies for wheel:", traceback.format_exc()) 224 wheel.unlink() 225 raise e 226 227 228 main.typer_kwargs = { # type: ignore[attr-defined] 229 "context_settings": { 230 "ignore_unknown_options": True, 231 "allow_extra_args": True, 232 }, 233 } 234 [end of pyodide-build/pyodide_build/cli/build.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/cli/build.py b/pyodide-build/pyodide_build/cli/build.py --- a/pyodide-build/pyodide_build/cli/build.py +++ b/pyodide-build/pyodide_build/cli/build.py @@ -1,5 +1,6 @@ import re import shutil +import sys import tempfile from pathlib import Path from typing import Optional @@ -113,9 +114,12 @@ help="Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.", ), output_directory: str = typer.Option( - "./dist", + "", + "--outdir", + "-o", help="which directory should the output be placed into?", ), + output_directory_compat: str = typer.Option("", "--output-directory", hidden=True), requirements_txt: str = typer.Option( "", "--requirements", @@ -143,6 +147,16 @@ ctx: typer.Context = typer.Context, ) -> None: """Use pypa/build to build a Python package from source, pypi or url.""" + if output_directory_compat: + print( + "--output-directory is deprecated, use --outdir or -o instead", + file=sys.stderr, + ) + if output_directory_compat and output_directory: + print("Cannot provide both --outdir and --output-directory", file=sys.stderr) + sys.exit(1) + output_directory = output_directory_compat or output_directory or "./dist" + outpath = Path(output_directory).resolve() outpath.mkdir(exist_ok=True) extras: list[str] = []
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/cli/build.py b/pyodide-build/pyodide_build/cli/build.py\n--- a/pyodide-build/pyodide_build/cli/build.py\n+++ b/pyodide-build/pyodide_build/cli/build.py\n@@ -1,5 +1,6 @@\n import re\n import shutil\n+import sys\n import tempfile\n from pathlib import Path\n from typing import Optional\n@@ -113,9 +114,12 @@\n help=\"Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.\",\n ),\n output_directory: str = typer.Option(\n- \"./dist\",\n+ \"\",\n+ \"--outdir\",\n+ \"-o\",\n help=\"which directory should the output be placed into?\",\n ),\n+ output_directory_compat: str = typer.Option(\"\", \"--output-directory\", hidden=True),\n requirements_txt: str = typer.Option(\n \"\",\n \"--requirements\",\n@@ -143,6 +147,16 @@\n ctx: typer.Context = typer.Context,\n ) -> None:\n \"\"\"Use pypa/build to build a Python package from source, pypi or url.\"\"\"\n+ if output_directory_compat:\n+ print(\n+ \"--output-directory is deprecated, use --outdir or -o instead\",\n+ file=sys.stderr,\n+ )\n+ if output_directory_compat and output_directory:\n+ print(\"Cannot provide both --outdir and --output-directory\", file=sys.stderr)\n+ sys.exit(1)\n+ output_directory = output_directory_compat or output_directory or \"./dist\"\n+\n outpath = Path(output_directory).resolve()\n outpath.mkdir(exist_ok=True)\n extras: list[str] = []\n", "issue": "Allow specifying output directory in pyodide build with -o or --outdir\nI added the capability to specify output directory in pyodide build with `--output-directory`. But here `pypa/build` accepts either `--outdir` or `-o`. We should accept these as ways to specify the output directory for consistency\n", "before_files": [{"content": "import re\nimport shutil\nimport tempfile\nfrom pathlib import Path\nfrom typing import Optional\nfrom urllib.parse import urlparse\n\nimport requests\nimport typer\n\nfrom .. import common\nfrom ..out_of_tree import build\nfrom ..out_of_tree.pypi import (\n build_dependencies_for_wheel,\n build_wheels_from_pypi_requirements,\n fetch_pypi_package,\n)\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\n\ndef pypi(\n package: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel from pypi, or build from source if none available.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n srcdir = Path(tmpdir)\n\n # get package from pypi\n package_path = fetch_pypi_package(package, srcdir)\n if not package_path.is_dir():\n # a pure-python wheel has been downloaded - just copy to dist folder\n dest_file = output_directory / package_path.name\n shutil.copyfile(str(package_path), output_directory / package_path.name)\n print(f\"Successfully fetched: {package_path.name}\")\n return dest_file\n\n built_wheel = build.run(srcdir, output_directory, exports, backend_flags)\n return built_wheel\n\n\ndef download_url(url: str, output_directory: Path) -> str:\n with requests.get(url, stream=True) as response:\n urlpath = Path(urlparse(response.url).path)\n if urlpath.suffix == \".gz\":\n urlpath = urlpath.with_suffix(\"\")\n file_name = urlpath.name\n with open(output_directory / file_name, \"wb\") as f:\n for chunk in response.iter_content(chunk_size=1 << 20):\n f.write(chunk)\n return file_name\n\n\ndef url(\n package_url: str,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Fetch a wheel or build sdist from url.\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n with tempfile.TemporaryDirectory() as tmpdir:\n tmppath = Path(tmpdir)\n filename = download_url(package_url, tmppath)\n if Path(filename).suffix == \".whl\":\n shutil.move(tmppath / filename, output_directory / filename)\n return output_directory / filename\n\n builddir = tmppath / \"build\"\n shutil.unpack_archive(tmppath / filename, builddir)\n files = list(builddir.iterdir())\n if len(files) == 1 and files[0].is_dir():\n # unzipped into subfolder\n builddir = files[0]\n wheel_path = build.run(builddir, output_directory, exports, backend_flags)\n return wheel_path\n\n\ndef source(\n source_location: Path,\n output_directory: Path,\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n ctx: typer.Context = typer.Context,\n) -> Path:\n \"\"\"Use pypa/build to build a Python package from source\"\"\"\n initialize_pyodide_root()\n common.check_emscripten_version()\n backend_flags = ctx.args\n built_wheel = build.run(source_location, output_directory, exports, backend_flags)\n return built_wheel\n\n\n# simple 'pyodide build' command\ndef main(\n source_location: \"Optional[str]\" = typer.Argument(\n \"\",\n help=\"Build source, can be source folder, pypi version specification, or url to a source dist archive or wheel file. If this is blank, it will build the current directory.\",\n ),\n output_directory: str = typer.Option(\n \"./dist\",\n help=\"which directory should the output be placed into?\",\n ),\n requirements_txt: str = typer.Option(\n \"\",\n \"--requirements\",\n \"-r\",\n help=\"Build a list of package requirements from a requirements.txt file\",\n ),\n exports: str = typer.Option(\n \"requested\",\n help=\"Which symbols should be exported when linking .so files?\",\n ),\n build_dependencies: bool = typer.Option(\n False, help=\"Fetch non-pyodide dependencies from pypi and build them too.\"\n ),\n output_lockfile: str = typer.Option(\n \"\",\n help=\"Output list of resolved dependencies to a file in requirements.txt format\",\n ),\n skip_dependency: list[str] = typer.Option(\n [],\n help=\"Skip building or resolving a single dependency. Use multiple times or provide a comma separated list to skip multiple dependencies.\",\n ),\n compression_level: int = typer.Option(\n 6, help=\"Compression level to use for the created zip file\"\n ),\n ctx: typer.Context = typer.Context,\n) -> None:\n \"\"\"Use pypa/build to build a Python package from source, pypi or url.\"\"\"\n outpath = Path(output_directory).resolve()\n outpath.mkdir(exist_ok=True)\n extras: list[str] = []\n\n if len(requirements_txt) > 0:\n # a requirements.txt - build it (and optionally deps)\n if not Path(requirements_txt).exists():\n raise RuntimeError(\n f\"Couldn't find requirements text file {requirements_txt}\"\n )\n reqs = []\n with open(requirements_txt) as f:\n raw_reqs = [x.strip() for x in f.readlines()]\n for x in raw_reqs:\n # remove comments\n comment_pos = x.find(\"#\")\n if comment_pos != -1:\n x = x[:comment_pos].strip()\n if len(x) > 0:\n if x[0] == \"-\":\n raise RuntimeError(\n f\"pyodide build only supports name-based PEP508 requirements. [{x}] will not work.\"\n )\n if x.find(\"@\") != -1:\n raise RuntimeError(\n f\"pyodide build does not support URL based requirements. [{x}] will not work\"\n )\n reqs.append(x)\n try:\n build_wheels_from_pypi_requirements(\n reqs,\n outpath,\n build_dependencies,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building multiple wheels:\", traceback.format_exc())\n raise e\n return\n\n if source_location is not None:\n extras = re.findall(r\"\\[(\\w+)\\]\", source_location)\n if len(extras) != 0:\n source_location = source_location[0 : source_location.find(\"[\")]\n if not source_location:\n # build the current folder\n wheel = source(Path.cwd(), outpath, exports, ctx)\n elif source_location.find(\"://\") != -1:\n wheel = url(source_location, outpath, exports, ctx)\n elif Path(source_location).is_dir():\n # a folder, build it\n wheel = source(Path(source_location).resolve(), outpath, exports, ctx)\n elif source_location.find(\"/\") == -1:\n # try fetch or build from pypi\n wheel = pypi(source_location, outpath, exports, ctx)\n else:\n raise RuntimeError(f\"Couldn't determine source type for {source_location}\")\n # now build deps for wheel\n if build_dependencies:\n try:\n build_dependencies_for_wheel(\n wheel,\n extras,\n skip_dependency,\n exports,\n ctx.args,\n output_lockfile=output_lockfile,\n compression_level=compression_level,\n )\n except BaseException as e:\n import traceback\n\n print(\"Failed building dependencies for wheel:\", traceback.format_exc())\n wheel.unlink()\n raise e\n\n\nmain.typer_kwargs = { # type: ignore[attr-defined]\n \"context_settings\": {\n \"ignore_unknown_options\": True,\n \"allow_extra_args\": True,\n },\n}\n", "path": "pyodide-build/pyodide_build/cli/build.py"}]}
2,985
386
gh_patches_debug_15071
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-688
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update acknowledgements to include NSF CSSI grant Good news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include: - [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF - [x] Add an acknowledgements section at the end of `README.md` - [x] Update `CITATION.md` - [x] Update `docs/about/credits.rst` - [x] Update `plasmapy.__citation__` Another thing peripherally related to this issue is to: - [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments I'll assign this to myself. </issue> <code> [start of plasmapy/__init__.py] 1 """ 2 PlasmaPy: A plasma physics Python package 3 ================================================ 4 5 Documentation is available in the docstrings, 6 online at https://docs.plasmapy.org (accessible also using 7 the ``plasmapy.online_help`` function). 8 9 Contents 10 -------- 11 PlasmaPy provides the following functionality: 12 13 Subpackages 14 ----------- 15 Each of these subpackages requires an explicit import, for example, 16 via ``import plasmapy.physics``. 17 18 :: 19 20 atomic --- Database for atoms, isotopes, ions... 21 classes --- (WIP) classes used in multiple places 22 data --- Data used for testing and examples 23 diagnostics --- Experimental research data analysis 24 mathematics --- General formulae used elsewhere 25 physics --- Plasma theory functionality 26 transport --- Transport theory functionality 27 utils --- Various utilities 28 29 Utility tools 30 ------------- 31 :: 32 33 test --- Run PlasmaPy unit tests 34 online_help --- Search the online documentation 35 __version__ --- PlasmaPy version string 36 __citation__ --- PlasmaPy citation template 37 38 """ 39 # Licensed under a 3-clause BSD style license - see LICENSE.rst 40 41 # Packages may add whatever they like to this file, but 42 # should keep this content at the top. 43 # ---------------------------------------------------------------------------- 44 from ._base_init import * 45 # ---------------------------------------------------------------------------- 46 47 # Enforce Python version check during package import. 48 # This is the same check as the one at the top of setup.py 49 import sys 50 51 __name__ = "plasmapy" 52 53 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))): 54 raise Exception("plasmapy does not support Python < {}".format(3.6)) 55 56 57 def online_help(query): 58 """ 59 Search the online PlasmaPy documentation for the given query from plasmapy.org 60 Opens the results in the default web browser. 61 Requires an active Internet connection. 62 Redirects to Astropy.units in case of query 'unit' or 'units' 63 64 Parameters 65 ---------- 66 query : str 67 The search query. 68 """ 69 from urllib.parse import urlencode 70 import webbrowser 71 72 url = ('http://docs.plasmapy.org/en/stable/search.html?' 73 '{0}&check_keywords=yes&area=default').format(urlencode({'q': query})) 74 75 if(query.lower() in ('unit', 'units')): 76 url = 'http://docs.astropy.org/en/stable/units/' 77 78 webbrowser.open(url) 79 80 81 __citation__ = [ 82 "https://doi.org/10.5281/zenodo.1238132", 83 "https://doi.org/10.5281/zenodo.3235817", 84 ] 85 [end of plasmapy/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py --- a/plasmapy/__init__.py +++ b/plasmapy/__init__.py @@ -50,8 +50,13 @@ __name__ = "plasmapy" +__citation__ = ( + "Instructions on how to cite and acknowledge PlasmaPy are provided in the " + "online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html" +) + if sys.version_info < tuple((int(val) for val in "3.6".split('.'))): - raise Exception("plasmapy does not support Python < {}".format(3.6)) + raise Exception("PlasmaPy does not support Python < {}".format(3.6)) def online_help(query): @@ -76,9 +81,3 @@ url = 'http://docs.astropy.org/en/stable/units/' webbrowser.open(url) - - -__citation__ = [ - "https://doi.org/10.5281/zenodo.1238132", - "https://doi.org/10.5281/zenodo.3235817", -]
{"golden_diff": "diff --git a/plasmapy/__init__.py b/plasmapy/__init__.py\n--- a/plasmapy/__init__.py\n+++ b/plasmapy/__init__.py\n@@ -50,8 +50,13 @@\n \n __name__ = \"plasmapy\"\n \n+__citation__ = (\n+ \"Instructions on how to cite and acknowledge PlasmaPy are provided in the \"\n+ \"online documentation at: http://docs.plasmapy.org/en/latest/about/citation.html\"\n+)\n+\n if sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n- raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n+ raise Exception(\"PlasmaPy does not support Python < {}\".format(3.6))\n \n \n def online_help(query):\n@@ -76,9 +81,3 @@\n url = 'http://docs.astropy.org/en/stable/units/'\n \n webbrowser.open(url)\n-\n-\n-__citation__ = [\n- \"https://doi.org/10.5281/zenodo.1238132\",\n- \"https://doi.org/10.5281/zenodo.3235817\",\n-]\n", "issue": "Update acknowledgements to include NSF CSSI grant\nGood news, everyone! The NSF CSSI proposal that we submitted has officially been awarded! At the request of NSF program manager Slava Lukin, we should update our acknowledgements to include this grant and reflect the support from NSF. I'll assign myself to do this when the grant officially starts (which appears to have been moved up to October 1, 2019). The things that we need to do include:\r\n\r\n - [x] Revise the sentence that we ask people to include in the acknowledgements section of their papers to reflect funding from NSF \r\n - [x] Add an acknowledgements section at the end of `README.md`\r\n - [x] Update `CITATION.md`\r\n - [x] Update `docs/about/credits.rst`\r\n - [x] Update `plasmapy.__citation__`\r\n\r\nAnother thing peripherally related to this issue is to:\r\n\r\n - [x] Update the [proposal](https://doi.org/10.5281/zenodo.2633286) that we archived on Zenodo to include the answers to questions we had to send in and the panel's comments\r\n\r\nI'll assign this to myself. \r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nPlasmaPy: A plasma physics Python package\n================================================\n\nDocumentation is available in the docstrings,\nonline at https://docs.plasmapy.org (accessible also using\nthe ``plasmapy.online_help`` function).\n\nContents\n--------\nPlasmaPy provides the following functionality:\n\nSubpackages\n-----------\nEach of these subpackages requires an explicit import, for example,\nvia ``import plasmapy.physics``.\n\n::\n\n atomic --- Database for atoms, isotopes, ions...\n classes --- (WIP) classes used in multiple places\n data --- Data used for testing and examples\n diagnostics --- Experimental research data analysis\n mathematics --- General formulae used elsewhere\n physics --- Plasma theory functionality\n transport --- Transport theory functionality\n utils --- Various utilities\n\nUtility tools\n-------------\n::\n\n test --- Run PlasmaPy unit tests\n online_help --- Search the online documentation\n __version__ --- PlasmaPy version string\n __citation__ --- PlasmaPy citation template\n\n\"\"\"\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n# Packages may add whatever they like to this file, but\n# should keep this content at the top.\n# ----------------------------------------------------------------------------\nfrom ._base_init import *\n# ----------------------------------------------------------------------------\n\n# Enforce Python version check during package import.\n# This is the same check as the one at the top of setup.py\nimport sys\n\n__name__ = \"plasmapy\"\n\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n raise Exception(\"plasmapy does not support Python < {}\".format(3.6))\n\n\ndef online_help(query):\n \"\"\"\n Search the online PlasmaPy documentation for the given query from plasmapy.org\n Opens the results in the default web browser.\n Requires an active Internet connection.\n Redirects to Astropy.units in case of query 'unit' or 'units'\n\n Parameters\n ----------\n query : str\n The search query.\n \"\"\"\n from urllib.parse import urlencode\n import webbrowser\n\n url = ('http://docs.plasmapy.org/en/stable/search.html?'\n '{0}&check_keywords=yes&area=default').format(urlencode({'q': query}))\n\n if(query.lower() in ('unit', 'units')):\n url = 'http://docs.astropy.org/en/stable/units/'\n\n webbrowser.open(url)\n\n\n__citation__ = [\n \"https://doi.org/10.5281/zenodo.1238132\",\n \"https://doi.org/10.5281/zenodo.3235817\",\n]\n", "path": "plasmapy/__init__.py"}]}
1,547
284
gh_patches_debug_26065
rasdani/github-patches
git_diff
iterative__dvc-2743
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dvc: .dvcignore trouble with nfs mounted directory I have a large NFS mounted in a directory that I would like dvc to ignore. Directory Structure: ``` directory |___nfs |___... |___.dvc |___.dvcignore ``` My *.dvcignore* has the following line: `/nfs/` (I've tried `nfs/` and `nfs/*`) The problem is that when I run `dvc status` or `dvc pull` the processes will just hang: ``` DEBUG: PRAGMA user_version; DEBUG: fetched: [(3,)] DEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL) DEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER) DEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL) DEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info) DEBUG: PRAGMA user_version = 3; ``` Here is the traceback from `KeyboardInterrupt`: ``` File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py", line 499, in dvcignore return DvcIgnoreFilter(self.root_dir) File "/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py", line 67, in __init__ for root, dirs, _ in os.walk(root_dir): File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 410, in walk yield from walk(new_path, topdown, onerror, followlinks) File "/home/ec2-user/app/proc/.env/lib64/python3.7/os.py", line 368, in walk is_dir = entry.is_dir() ``` Which makes me feel like the directory is not being ignored. ***Additonal*** I've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it. System Information: ``` DVC version: 0.66.6 Python version: 3.7.4 Platform: Linux 4.14.109-99.92.amzn2.x86_64 Installation: pip ``` </issue> <code> [start of dvc/ignore.py] 1 from __future__ import unicode_literals 2 3 import logging 4 import os 5 6 from pathspec import PathSpec 7 from pathspec.patterns import GitWildMatchPattern 8 9 from dvc.utils import relpath 10 from dvc.utils.compat import open 11 12 logger = logging.getLogger(__name__) 13 14 15 class DvcIgnore(object): 16 DVCIGNORE_FILE = ".dvcignore" 17 18 def __call__(self, root, dirs, files): 19 raise NotImplementedError 20 21 22 class DvcIgnorePatterns(DvcIgnore): 23 def __init__(self, ignore_file_path): 24 assert os.path.isabs(ignore_file_path) 25 26 self.ignore_file_path = ignore_file_path 27 self.dirname = os.path.normpath(os.path.dirname(ignore_file_path)) 28 29 with open(ignore_file_path, encoding="utf-8") as fobj: 30 self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj) 31 32 def __call__(self, root, dirs, files): 33 files = [f for f in files if not self.matches(root, f)] 34 dirs = [d for d in dirs if not self.matches(root, d)] 35 36 return dirs, files 37 38 def matches(self, dirname, basename): 39 abs_path = os.path.join(dirname, basename) 40 rel_path = relpath(abs_path, self.dirname) 41 42 if os.pardir + os.sep in rel_path: 43 return False 44 return self.ignore_spec.match_file(rel_path) 45 46 def __hash__(self): 47 return hash(self.ignore_file_path) 48 49 def __eq__(self, other): 50 return self.ignore_file_path == other.ignore_file_path 51 52 53 class DvcIgnoreDirs(DvcIgnore): 54 def __init__(self, basenames): 55 self.basenames = set(basenames) 56 57 def __call__(self, root, dirs, files): 58 dirs = [d for d in dirs if d not in self.basenames] 59 60 return dirs, files 61 62 63 class DvcIgnoreFilter(object): 64 def __init__(self, root_dir): 65 self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])} 66 self._update(root_dir) 67 for root, dirs, _ in os.walk(root_dir): 68 for d in dirs: 69 self._update(os.path.join(root, d)) 70 71 def _update(self, dirname): 72 ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE) 73 if os.path.exists(ignore_file_path): 74 self.ignores.add(DvcIgnorePatterns(ignore_file_path)) 75 76 def __call__(self, root, dirs, files): 77 for ignore in self.ignores: 78 dirs, files = ignore(root, dirs, files) 79 80 return dirs, files 81 [end of dvc/ignore.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/ignore.py b/dvc/ignore.py --- a/dvc/ignore.py +++ b/dvc/ignore.py @@ -6,6 +6,7 @@ from pathspec import PathSpec from pathspec.patterns import GitWildMatchPattern +from dvc.utils import dvc_walk from dvc.utils import relpath from dvc.utils.compat import open @@ -47,6 +48,9 @@ return hash(self.ignore_file_path) def __eq__(self, other): + if not isinstance(other, DvcIgnorePatterns): + return NotImplemented + return self.ignore_file_path == other.ignore_file_path @@ -59,12 +63,21 @@ return dirs, files + def __hash__(self): + return hash(tuple(self.basenames)) + + def __eq__(self, other): + if not isinstance(other, DvcIgnoreDirs): + return NotImplemented + + return self.basenames == other.basenames + class DvcIgnoreFilter(object): def __init__(self, root_dir): self.ignores = {DvcIgnoreDirs([".git", ".hg", ".dvc"])} self._update(root_dir) - for root, dirs, _ in os.walk(root_dir): + for root, dirs, _ in dvc_walk(root_dir, self): for d in dirs: self._update(os.path.join(root, d))
{"golden_diff": "diff --git a/dvc/ignore.py b/dvc/ignore.py\n--- a/dvc/ignore.py\n+++ b/dvc/ignore.py\n@@ -6,6 +6,7 @@\n from pathspec import PathSpec\n from pathspec.patterns import GitWildMatchPattern\n \n+from dvc.utils import dvc_walk\n from dvc.utils import relpath\n from dvc.utils.compat import open\n \n@@ -47,6 +48,9 @@\n return hash(self.ignore_file_path)\n \n def __eq__(self, other):\n+ if not isinstance(other, DvcIgnorePatterns):\n+ return NotImplemented\n+\n return self.ignore_file_path == other.ignore_file_path\n \n \n@@ -59,12 +63,21 @@\n \n return dirs, files\n \n+ def __hash__(self):\n+ return hash(tuple(self.basenames))\n+\n+ def __eq__(self, other):\n+ if not isinstance(other, DvcIgnoreDirs):\n+ return NotImplemented\n+\n+ return self.basenames == other.basenames\n+\n \n class DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n- for root, dirs, _ in os.walk(root_dir):\n+ for root, dirs, _ in dvc_walk(root_dir, self):\n for d in dirs:\n self._update(os.path.join(root, d))\n", "issue": "dvc: .dvcignore trouble with nfs mounted directory\nI have a large NFS mounted in a directory that I would like dvc to ignore. \r\n\r\nDirectory Structure:\r\n```\r\ndirectory\r\n|___nfs\r\n|___...\r\n|___.dvc\r\n|___.dvcignore\r\n```\r\nMy *.dvcignore* has the following line:\r\n`/nfs/` (I've tried `nfs/` and `nfs/*`)\r\n\r\nThe problem is that when I run `dvc status` or `dvc pull` the processes will just hang:\r\n```\r\nDEBUG: PRAGMA user_version;\r\nDEBUG: fetched: [(3,)]\r\nDEBUG: CREATE TABLE IF NOT EXISTS state (inode INTEGER PRIMARY KEY, mtime TEXT NOT NULL, size TEXT NOT NULL, md5 TEXT NOT NULL, timestamp TEXT NOT NULL)\r\nDEBUG: CREATE TABLE IF NOT EXISTS state_info (count INTEGER)\r\nDEBUG: CREATE TABLE IF NOT EXISTS link_state (path TEXT PRIMARY KEY, inode INTEGER NOT NULL, mtime TEXT NOT NULL)\r\nDEBUG: INSERT OR IGNORE INTO state_info (count) SELECT 0 WHERE NOT EXISTS (SELECT * FROM state_info)\r\nDEBUG: PRAGMA user_version = 3; \r\n```\r\n\r\nHere is the traceback from `KeyboardInterrupt`:\r\n```\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/repo/__init__.py\", line 499, in dvcignore\r\n return DvcIgnoreFilter(self.root_dir)\r\n File \"/home/ec2-user/app/proc/.env/lib/python3.7/site-packages/dvc/ignore.py\", line 67, in __init__\r\n for root, dirs, _ in os.walk(root_dir):\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 410, in walk\r\n yield from walk(new_path, topdown, onerror, followlinks)\r\n File \"/home/ec2-user/app/proc/.env/lib64/python3.7/os.py\", line 368, in walk\r\n is_dir = entry.is_dir() \r\n```\r\nWhich makes me feel like the directory is not being ignored.\r\n\r\n***Additonal***\r\nI've unmounted the NFS directory and ran `dvc status` with no problem so I believe the issue stems from dvc trying to traverse it.\r\n\r\nSystem Information:\r\n``` \r\nDVC version: 0.66.6\r\nPython version: 3.7.4\r\nPlatform: Linux 4.14.109-99.92.amzn2.x86_64\r\nInstallation: pip\r\n```\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os\n\nfrom pathspec import PathSpec\nfrom pathspec.patterns import GitWildMatchPattern\n\nfrom dvc.utils import relpath\nfrom dvc.utils.compat import open\n\nlogger = logging.getLogger(__name__)\n\n\nclass DvcIgnore(object):\n DVCIGNORE_FILE = \".dvcignore\"\n\n def __call__(self, root, dirs, files):\n raise NotImplementedError\n\n\nclass DvcIgnorePatterns(DvcIgnore):\n def __init__(self, ignore_file_path):\n assert os.path.isabs(ignore_file_path)\n\n self.ignore_file_path = ignore_file_path\n self.dirname = os.path.normpath(os.path.dirname(ignore_file_path))\n\n with open(ignore_file_path, encoding=\"utf-8\") as fobj:\n self.ignore_spec = PathSpec.from_lines(GitWildMatchPattern, fobj)\n\n def __call__(self, root, dirs, files):\n files = [f for f in files if not self.matches(root, f)]\n dirs = [d for d in dirs if not self.matches(root, d)]\n\n return dirs, files\n\n def matches(self, dirname, basename):\n abs_path = os.path.join(dirname, basename)\n rel_path = relpath(abs_path, self.dirname)\n\n if os.pardir + os.sep in rel_path:\n return False\n return self.ignore_spec.match_file(rel_path)\n\n def __hash__(self):\n return hash(self.ignore_file_path)\n\n def __eq__(self, other):\n return self.ignore_file_path == other.ignore_file_path\n\n\nclass DvcIgnoreDirs(DvcIgnore):\n def __init__(self, basenames):\n self.basenames = set(basenames)\n\n def __call__(self, root, dirs, files):\n dirs = [d for d in dirs if d not in self.basenames]\n\n return dirs, files\n\n\nclass DvcIgnoreFilter(object):\n def __init__(self, root_dir):\n self.ignores = {DvcIgnoreDirs([\".git\", \".hg\", \".dvc\"])}\n self._update(root_dir)\n for root, dirs, _ in os.walk(root_dir):\n for d in dirs:\n self._update(os.path.join(root, d))\n\n def _update(self, dirname):\n ignore_file_path = os.path.join(dirname, DvcIgnore.DVCIGNORE_FILE)\n if os.path.exists(ignore_file_path):\n self.ignores.add(DvcIgnorePatterns(ignore_file_path))\n\n def __call__(self, root, dirs, files):\n for ignore in self.ignores:\n dirs, files = ignore(root, dirs, files)\n\n return dirs, files\n", "path": "dvc/ignore.py"}]}
1,831
325
gh_patches_debug_16139
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1922
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> E1017 with nested !Select *cfn-lint version: (`cfn-lint --version`)* 0.46.0 *Description of issue.* When linting this CFT YAML: ```yaml subnet0A: DependsOn: ipv6CidrBlock Type: 'AWS::EC2::Subnet' Properties: VpcId: !Ref vpc CidrBlock: !Select - !Select - 1 - !Split - ',' - !FindInMap - subnetMap - !Ref numAzs - !Ref numSubnets - 'Fn::Cidr': - !GetAtt - vpc - CidrBlock - !Select - 0 - !Split - ',' - !FindInMap - subnetMap - !Ref numAzs - !Ref numSubnets - !FindInMap - subnetMap - maskTocidrBits - !Ref subnetMask ``` I get this error: E1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select Template works fine. thanks Cfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`) </issue> <code> [start of src/cfnlint/rules/functions/Select.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import six 6 from cfnlint.rules import CloudFormationLintRule 7 from cfnlint.rules import RuleMatch 8 9 10 class Select(CloudFormationLintRule): 11 """Check if Select values are correct""" 12 id = 'E1017' 13 shortdesc = 'Select validation of parameters' 14 description = 'Making sure the function not is of list' 15 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html' 16 tags = ['functions', 'select'] 17 18 def match(self, cfn): 19 matches = [] 20 21 select_objs = cfn.search_deep_keys('Fn::Select') 22 23 supported_functions = [ 24 'Fn::FindInMap', 25 'Fn::GetAtt', 26 'Fn::GetAZs', 27 'Fn::If', 28 'Fn::Split', 29 'Fn::Cidr', 30 'Ref' 31 ] 32 33 for select_obj in select_objs: 34 select_value_obj = select_obj[-1] 35 tree = select_obj[:-1] 36 if isinstance(select_value_obj, list): 37 if len(select_value_obj) == 2: 38 index_obj = select_value_obj[0] 39 list_of_objs = select_value_obj[1] 40 if isinstance(index_obj, dict): 41 if len(index_obj) == 1: 42 for index_key, _ in index_obj.items(): 43 if index_key not in ['Ref', 'Fn::FindInMap']: 44 message = 'Select index should be an Integer or a function Ref or FindInMap for {0}' 45 matches.append(RuleMatch( 46 tree, message.format('/'.join(map(str, tree))))) 47 elif not isinstance(index_obj, six.integer_types): 48 try: 49 int(index_obj) 50 except ValueError: 51 message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}' 52 matches.append(RuleMatch( 53 tree, message.format('/'.join(map(str, tree))))) 54 if isinstance(list_of_objs, dict): 55 if len(list_of_objs) == 1: 56 for key, _ in list_of_objs.items(): 57 if key not in supported_functions: 58 message = 'Select should use a supported function of {0}' 59 matches.append(RuleMatch( 60 tree, message.format(', '.join(map(str, supported_functions))))) 61 else: 62 message = 'Select should use a supported function of {0}' 63 matches.append(RuleMatch( 64 tree, message.format(', '.join(map(str, supported_functions))))) 65 elif not isinstance(list_of_objs, list): 66 message = 'Select should be an array of values for {0}' 67 matches.append(RuleMatch( 68 tree, message.format('/'.join(map(str, tree))))) 69 else: 70 message = 'Select should be a list of 2 elements for {0}' 71 matches.append(RuleMatch( 72 tree, message.format('/'.join(map(str, tree))))) 73 else: 74 message = 'Select should be a list of 2 elements for {0}' 75 matches.append(RuleMatch( 76 tree, message.format('/'.join(map(str, tree))))) 77 return matches 78 [end of src/cfnlint/rules/functions/Select.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py --- a/src/cfnlint/rules/functions/Select.py +++ b/src/cfnlint/rules/functions/Select.py @@ -40,7 +40,7 @@ if isinstance(index_obj, dict): if len(index_obj) == 1: for index_key, _ in index_obj.items(): - if index_key not in ['Ref', 'Fn::FindInMap']: + if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']: message = 'Select index should be an Integer or a function Ref or FindInMap for {0}' matches.append(RuleMatch( tree, message.format('/'.join(map(str, tree)))))
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py\n--- a/src/cfnlint/rules/functions/Select.py\n+++ b/src/cfnlint/rules/functions/Select.py\n@@ -40,7 +40,7 @@\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n- if index_key not in ['Ref', 'Fn::FindInMap']:\n+ if index_key not in ['Ref', 'Fn::FindInMap', 'Fn::Select']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n", "issue": "E1017 with nested !Select\n*cfn-lint version: (`cfn-lint --version`)*\r\n0.46.0\r\n\r\n*Description of issue.*\r\nWhen linting this CFT YAML:\r\n```yaml\r\nsubnet0A:\r\n DependsOn: ipv6CidrBlock\r\n Type: 'AWS::EC2::Subnet'\r\n Properties:\r\n VpcId: !Ref vpc\r\n CidrBlock: !Select\r\n - !Select\r\n - 1\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - 'Fn::Cidr':\r\n - !GetAtt\r\n - vpc\r\n - CidrBlock\r\n - !Select\r\n - 0\r\n - !Split\r\n - ','\r\n - !FindInMap\r\n - subnetMap\r\n - !Ref numAzs\r\n - !Ref numSubnets\r\n - !FindInMap\r\n - subnetMap\r\n - maskTocidrBits\r\n - !Ref subnetMask\r\n```\r\n\r\nI get this error:\r\n\r\nE1017 Select index should be an Integer or a function Ref or FindInMap for Resources/subnet0A/Properties/CidrBlock/Fn::Select\r\n\r\nTemplate works fine.\r\nthanks\r\n\r\nCfn-lint uses the [CloudFormation Resource Specifications](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-resource-specification.html) as the base to do validation. These files are included as part of the application version. Please update to the latest version of `cfn-lint` or update the spec files manually (`cfn-lint -u`)\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport six\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n id = 'E1017'\n shortdesc = 'Select validation of parameters'\n description = 'Making sure the function not is of list'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html'\n tags = ['functions', 'select']\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys('Fn::Select')\n\n supported_functions = [\n 'Fn::FindInMap',\n 'Fn::GetAtt',\n 'Fn::GetAZs',\n 'Fn::If',\n 'Fn::Split',\n 'Fn::Cidr',\n 'Ref'\n ]\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n if isinstance(select_value_obj, list):\n if len(select_value_obj) == 2:\n index_obj = select_value_obj[0]\n list_of_objs = select_value_obj[1]\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in ['Ref', 'Fn::FindInMap']:\n message = 'Select index should be an Integer or a function Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n elif not isinstance(index_obj, six.integer_types):\n try:\n int(index_obj)\n except ValueError:\n message = 'Select index should be an Integer or a function of Ref or FindInMap for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n if isinstance(list_of_objs, dict):\n if len(list_of_objs) == 1:\n for key, _ in list_of_objs.items():\n if key not in supported_functions:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n else:\n message = 'Select should use a supported function of {0}'\n matches.append(RuleMatch(\n tree, message.format(', '.join(map(str, supported_functions)))))\n elif not isinstance(list_of_objs, list):\n message = 'Select should be an array of values for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n else:\n message = 'Select should be a list of 2 elements for {0}'\n matches.append(RuleMatch(\n tree, message.format('/'.join(map(str, tree)))))\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}]}
1,744
170
gh_patches_debug_3665
rasdani/github-patches
git_diff
pex-tool__pex-634
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bootstrap demotion is not robust While working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path. </issue> <code> [start of pex/bootstrap.py] 1 # coding=utf-8 2 # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 import os 6 7 8 class Bootstrap(object): 9 """Supports introspection of the PEX bootstrap code.""" 10 11 _INSTANCE = None 12 13 @classmethod 14 def locate(cls): 15 """Locates the active PEX bootstrap. 16 17 :rtype: :class:`Bootstrap` 18 """ 19 if cls._INSTANCE is None: 20 bootstrap_path = __file__ 21 module_import_path = __name__.split('.') 22 23 # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import 24 # path pex.bootstrap; so we walk back through all the module components of our import path to 25 # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example). 26 for _ in module_import_path: 27 bootstrap_path = os.path.dirname(bootstrap_path) 28 29 cls._INSTANCE = cls(sys_path_entry=bootstrap_path) 30 return cls._INSTANCE 31 32 def __init__(self, sys_path_entry): 33 self._sys_path_entry = sys_path_entry 34 self._realpath = os.path.realpath(self._sys_path_entry) 35 36 def demote(self): 37 """Demote the bootstrap code to the end of the `sys.path` so it is found last. 38 39 :return: The list of un-imported bootstrap modules. 40 :rtype: list of :class:`types.ModuleType` 41 """ 42 import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process. 43 44 unimported_modules = [] 45 for name, module in reversed(sorted(sys.modules.items())): 46 if self.imported_from_bootstrap(module): 47 unimported_modules.append(sys.modules.pop(name)) 48 49 sys.path.remove(self._sys_path_entry) 50 sys.path.append(self._sys_path_entry) 51 52 return unimported_modules 53 54 def imported_from_bootstrap(self, module): 55 """Return ``True`` if the given ``module`` object was imported from bootstrap code. 56 57 :param module: The module to check the provenance of. 58 :type module: :class:`types.ModuleType` 59 :rtype: bool 60 """ 61 62 # A vendored module. 63 path = getattr(module, '__file__', None) 64 if path and os.path.realpath(path).startswith(self._realpath): 65 return True 66 67 # A vendored package. 68 path = getattr(module, '__path__', None) 69 if path and any(os.path.realpath(path_item).startswith(self._realpath) 70 for path_item in path): 71 return True 72 73 return False 74 75 def __repr__(self): 76 return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__, 77 sys_path_entry=self._sys_path_entry) 78 [end of pex/bootstrap.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/bootstrap.py b/pex/bootstrap.py --- a/pex/bootstrap.py +++ b/pex/bootstrap.py @@ -46,7 +46,7 @@ if self.imported_from_bootstrap(module): unimported_modules.append(sys.modules.pop(name)) - sys.path.remove(self._sys_path_entry) + sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath] sys.path.append(self._sys_path_entry) return unimported_modules
{"golden_diff": "diff --git a/pex/bootstrap.py b/pex/bootstrap.py\n--- a/pex/bootstrap.py\n+++ b/pex/bootstrap.py\n@@ -46,7 +46,7 @@\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n \n- sys.path.remove(self._sys_path_entry)\n+ sys.path[:] = [path for path in sys.path if os.path.realpath(path) != self._realpath]\n sys.path.append(self._sys_path_entry)\n \n return unimported_modules\n", "issue": "Bootstrap demotion is not robust\nWhile working https://github.com/pantsbuild/pants/issues/6927 it was discovered that there were multiple `.bootstrap/` on the sys.path and only one was demoted, leading to pants, when running in a pex, picking .bootstrap.pex instead of the pex dist it in `.deps/`. In this case, Pants was purposefully duplicating pex path entries to work around a bug in the `coverage` library. PEX should be robust and demote all instances of it's `.bootstrap/` entry on the sys.path.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\n\nclass Bootstrap(object):\n \"\"\"Supports introspection of the PEX bootstrap code.\"\"\"\n\n _INSTANCE = None\n\n @classmethod\n def locate(cls):\n \"\"\"Locates the active PEX bootstrap.\n\n :rtype: :class:`Bootstrap`\n \"\"\"\n if cls._INSTANCE is None:\n bootstrap_path = __file__\n module_import_path = __name__.split('.')\n\n # For example, our __file__ might be requests.pex/.bootstrap/pex/bootstrap.pyc and our import\n # path pex.bootstrap; so we walk back through all the module components of our import path to\n # find the base sys.path entry where we were found (requests.pex/.bootstrap in this example).\n for _ in module_import_path:\n bootstrap_path = os.path.dirname(bootstrap_path)\n\n cls._INSTANCE = cls(sys_path_entry=bootstrap_path)\n return cls._INSTANCE\n\n def __init__(self, sys_path_entry):\n self._sys_path_entry = sys_path_entry\n self._realpath = os.path.realpath(self._sys_path_entry)\n\n def demote(self):\n \"\"\"Demote the bootstrap code to the end of the `sys.path` so it is found last.\n\n :return: The list of un-imported bootstrap modules.\n :rtype: list of :class:`types.ModuleType`\n \"\"\"\n import sys # Grab a hold of `sys` early since we'll be un-importing our module in this process.\n\n unimported_modules = []\n for name, module in reversed(sorted(sys.modules.items())):\n if self.imported_from_bootstrap(module):\n unimported_modules.append(sys.modules.pop(name))\n\n sys.path.remove(self._sys_path_entry)\n sys.path.append(self._sys_path_entry)\n\n return unimported_modules\n\n def imported_from_bootstrap(self, module):\n \"\"\"Return ``True`` if the given ``module`` object was imported from bootstrap code.\n\n :param module: The module to check the provenance of.\n :type module: :class:`types.ModuleType`\n :rtype: bool\n \"\"\"\n\n # A vendored module.\n path = getattr(module, '__file__', None)\n if path and os.path.realpath(path).startswith(self._realpath):\n return True\n\n # A vendored package.\n path = getattr(module, '__path__', None)\n if path and any(os.path.realpath(path_item).startswith(self._realpath)\n for path_item in path):\n return True\n\n return False\n\n def __repr__(self):\n return '{cls}(sys_path_entry={sys_path_entry!r})'.format(cls=type(self).__name__,\n sys_path_entry=self._sys_path_entry)\n", "path": "pex/bootstrap.py"}]}
1,424
115
gh_patches_debug_32954
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2442
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> in text commenting links to first chapter lead back to the project view ...so I am back to the timeline and the tiles There are two links doing that: "back to chapter" in paragraph detail view and "previous chapter" in second chapter. </issue> <code> [start of meinberlin/apps/documents/views.py] 1 from django.http import Http404 2 from django.urls import reverse 3 from django.utils.translation import ugettext_lazy as _ 4 from django.views import generic 5 6 from adhocracy4.dashboard import mixins as dashboard_mixins 7 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin 8 from adhocracy4.projects.mixins import ProjectMixin 9 from adhocracy4.rules import mixins as rules_mixins 10 from meinberlin.apps.exports.views import DashboardExportView 11 12 from . import models 13 14 15 class DocumentDashboardView(ProjectMixin, 16 dashboard_mixins.DashboardBaseMixin, 17 dashboard_mixins.DashboardComponentMixin, 18 generic.TemplateView): 19 template_name = 'meinberlin_documents/document_dashboard.html' 20 permission_required = 'a4projects.change_project' 21 22 def get_permission_object(self): 23 return self.project 24 25 26 class ChapterDetailView(ProjectMixin, 27 rules_mixins.PermissionRequiredMixin, 28 generic.DetailView, 29 DisplayProjectOrModuleMixin): 30 model = models.Chapter 31 permission_required = 'meinberlin_documents.view_chapter' 32 get_context_from_object = True 33 34 def get_context_data(self, **kwargs): 35 context = super(ChapterDetailView, self).get_context_data(**kwargs) 36 context['chapter_list'] = self.chapter_list 37 return context 38 39 @property 40 def chapter_list(self): 41 return models.Chapter.objects.filter(module=self.module) 42 43 44 class DocumentDetailView(ChapterDetailView): 45 get_context_from_object = False 46 47 def get_object(self): 48 first_chapter = models.Chapter.objects \ 49 .filter(module=self.module) \ 50 .first() 51 52 if not first_chapter: 53 raise Http404(_('Document has no chapters defined.')) 54 return first_chapter 55 56 57 class ParagraphDetailView(ProjectMixin, 58 rules_mixins.PermissionRequiredMixin, 59 generic.DetailView): 60 model = models.Paragraph 61 permission_required = 'meinberlin_documents.view_paragraph' 62 63 64 class DocumentDashboardExportView(DashboardExportView): 65 template_name = 'meinberlin_exports/export_dashboard.html' 66 67 def get_context_data(self, **kwargs): 68 context = super().get_context_data(**kwargs) 69 context['comment_export'] = reverse( 70 'a4dashboard:document-comment-export', 71 kwargs={'module_slug': self.module.slug}) 72 return context 73 [end of meinberlin/apps/documents/views.py] [start of meinberlin/apps/documents/models.py] 1 from ckeditor_uploader.fields import RichTextUploadingField 2 from django.contrib.contenttypes.fields import GenericRelation 3 from django.db import models 4 from django.urls import reverse 5 from django.utils.functional import cached_property 6 7 from adhocracy4 import transforms 8 from adhocracy4.comments import models as comment_models 9 from adhocracy4.models import base 10 from adhocracy4.modules import models as module_models 11 12 13 class Chapter(module_models.Item): 14 name = models.CharField(max_length=120) 15 comments = GenericRelation(comment_models.Comment, 16 related_query_name='chapter', 17 object_id_field='object_pk') 18 weight = models.PositiveIntegerField(default=0) 19 20 class Meta: 21 ordering = ('weight',) 22 23 def __str__(self): 24 return "{}_chapter_{}".format(str(self.module), self.pk) 25 26 def get_absolute_url(self): 27 return reverse('meinberlin_documents:chapter-detail', 28 args=[str(self.pk)]) 29 30 @cached_property 31 def prev(self): 32 return Chapter.objects\ 33 .filter(module=self.module)\ 34 .filter(weight__lt=self.weight)\ 35 .order_by('-weight')\ 36 .first() 37 38 @cached_property 39 def next(self): 40 return Chapter.objects\ 41 .filter(module=self.module)\ 42 .filter(weight__gt=self.weight)\ 43 .order_by('weight')\ 44 .first() 45 46 47 class Paragraph(base.TimeStampedModel): 48 name = models.CharField(max_length=120, blank=True) 49 text = RichTextUploadingField(config_name='image-editor') 50 weight = models.PositiveIntegerField() 51 chapter = models.ForeignKey(Chapter, 52 on_delete=models.CASCADE, 53 related_name='paragraphs') 54 comments = GenericRelation(comment_models.Comment, 55 related_query_name='paragraph', 56 object_id_field='object_pk') 57 58 class Meta: 59 ordering = ('weight',) 60 61 def __str__(self): 62 return "{}_paragraph_{}".format(str(self.chapter), self.weight) 63 64 def save(self, *args, **kwargs): 65 self.text = transforms.clean_html_field( 66 self.text, 'image-editor') 67 super().save(*args, **kwargs) 68 69 def get_absolute_url(self): 70 return reverse('meinberlin_documents:paragraph-detail', 71 args=[str(self.pk)]) 72 73 @property 74 def creator(self): 75 return self.chapter.creator 76 77 @property 78 def project(self): 79 return self.module.project 80 81 @property 82 def module(self): 83 return self.chapter.module 84 [end of meinberlin/apps/documents/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py --- a/meinberlin/apps/documents/models.py +++ b/meinberlin/apps/documents/models.py @@ -24,6 +24,12 @@ return "{}_chapter_{}".format(str(self.module), self.pk) def get_absolute_url(self): + if (self.project.display_timeline + and not self.module.is_in_module_cluster): + return ('{}?initialSlide={}' + .format(reverse('meinberlin_documents:chapter-detail', + args=[str(self.pk)]), + self.module.get_timeline_index)) return reverse('meinberlin_documents:chapter-detail', args=[str(self.pk)]) diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py --- a/meinberlin/apps/documents/views.py +++ b/meinberlin/apps/documents/views.py @@ -1,5 +1,6 @@ from django.http import Http404 from django.urls import reverse +from django.utils.functional import cached_property from django.utils.translation import ugettext_lazy as _ from django.views import generic @@ -32,7 +33,7 @@ get_context_from_object = True def get_context_data(self, **kwargs): - context = super(ChapterDetailView, self).get_context_data(**kwargs) + context = super().get_context_data(**kwargs) context['chapter_list'] = self.chapter_list return context @@ -40,6 +41,15 @@ def chapter_list(self): return models.Chapter.objects.filter(module=self.module) + @cached_property + def extends(self): + if self.url_name == 'module-detail': + return 'a4modules/module_detail.html' + if self.url_name == 'chapter-detail': + if self.module.is_in_module_cluster: + return 'a4modules/module_detail.html' + return 'a4projects/project_detail.html' + class DocumentDetailView(ChapterDetailView): get_context_from_object = False
{"golden_diff": "diff --git a/meinberlin/apps/documents/models.py b/meinberlin/apps/documents/models.py\n--- a/meinberlin/apps/documents/models.py\n+++ b/meinberlin/apps/documents/models.py\n@@ -24,6 +24,12 @@\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n \n def get_absolute_url(self):\n+ if (self.project.display_timeline\n+ and not self.module.is_in_module_cluster):\n+ return ('{}?initialSlide={}'\n+ .format(reverse('meinberlin_documents:chapter-detail',\n+ args=[str(self.pk)]),\n+ self.module.get_timeline_index))\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n \ndiff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,5 +1,6 @@\n from django.http import Http404\n from django.urls import reverse\n+from django.utils.functional import cached_property\n from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n \n@@ -32,7 +33,7 @@\n get_context_from_object = True\n \n def get_context_data(self, **kwargs):\n- context = super(ChapterDetailView, self).get_context_data(**kwargs)\n+ context = super().get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n \n@@ -40,6 +41,15 @@\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n \n+ @cached_property\n+ def extends(self):\n+ if self.url_name == 'module-detail':\n+ return 'a4modules/module_detail.html'\n+ if self.url_name == 'chapter-detail':\n+ if self.module.is_in_module_cluster:\n+ return 'a4modules/module_detail.html'\n+ return 'a4projects/project_detail.html'\n+\n \n class DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n", "issue": "in text commenting links to first chapter lead back to the project view\n...so I am back to the timeline and the tiles\r\n\r\nThere are two links doing that: \"back to chapter\" in paragraph detail view and \"previous chapter\" in second chapter.\n", "before_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}, {"content": "from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.models import base\nfrom adhocracy4.modules import models as module_models\n\n\nclass Chapter(module_models.Item):\n name = models.CharField(max_length=120)\n comments = GenericRelation(comment_models.Comment,\n related_query_name='chapter',\n object_id_field='object_pk')\n weight = models.PositiveIntegerField(default=0)\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_chapter_{}\".format(str(self.module), self.pk)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:chapter-detail',\n args=[str(self.pk)])\n\n @cached_property\n def prev(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__lt=self.weight)\\\n .order_by('-weight')\\\n .first()\n\n @cached_property\n def next(self):\n return Chapter.objects\\\n .filter(module=self.module)\\\n .filter(weight__gt=self.weight)\\\n .order_by('weight')\\\n .first()\n\n\nclass Paragraph(base.TimeStampedModel):\n name = models.CharField(max_length=120, blank=True)\n text = RichTextUploadingField(config_name='image-editor')\n weight = models.PositiveIntegerField()\n chapter = models.ForeignKey(Chapter,\n on_delete=models.CASCADE,\n related_name='paragraphs')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='paragraph',\n object_id_field='object_pk')\n\n class Meta:\n ordering = ('weight',)\n\n def __str__(self):\n return \"{}_paragraph_{}\".format(str(self.chapter), self.weight)\n\n def save(self, *args, **kwargs):\n self.text = transforms.clean_html_field(\n self.text, 'image-editor')\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('meinberlin_documents:paragraph-detail',\n args=[str(self.pk)])\n\n @property\n def creator(self):\n return self.chapter.creator\n\n @property\n def project(self):\n return self.module.project\n\n @property\n def module(self):\n return self.chapter.module\n", "path": "meinberlin/apps/documents/models.py"}]}
1,923
456
gh_patches_debug_32403
rasdani/github-patches
git_diff
microsoft__hi-ml-504
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Histo dataset mounting script does not show files permanently Files are only visible while the script is running, but disappear once terminated. This could be a consequence of the library updates in #455. </issue> <code> [start of hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py] 1 # ------------------------------------------------------------------------------------------ 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. 4 # ------------------------------------------------------------------------------------------ 5 from pathlib import Path 6 import sys 7 8 himl_histo_root_dir = Path(__file__).parent.parent.parent 9 himl_root = himl_histo_root_dir.parent.parent 10 himl_azure_package_root = himl_root / "hi-ml-azure" / "src" 11 sys.path.insert(0, str(himl_azure_package_root)) 12 13 from health_azure import DatasetConfig # noqa: E402 14 from health_azure.utils import get_workspace # noqa: E402 15 16 17 def mount_dataset(dataset_id: str) -> str: 18 ws = get_workspace() 19 target_folder = "/tmp/datasets/" + dataset_id 20 dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True) 21 dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws) 22 assert mount_ctx is not None # for mypy 23 mount_ctx.start() 24 return str(dataset_mount_folder) 25 26 27 if __name__ == '__main__': 28 import argparse 29 parser = argparse.ArgumentParser() 30 # Run this script as "python mount_azure_dataset.py --dataset_id TCGA-CRCk" 31 parser.add_argument('--dataset_id', type=str, 32 help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk') 33 args = parser.parse_args() 34 mount_dataset(args.dataset_id) 35 [end of hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py --- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py +++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py @@ -4,6 +4,8 @@ # ------------------------------------------------------------------------------------------ from pathlib import Path import sys +import time +from typing import Any himl_histo_root_dir = Path(__file__).parent.parent.parent himl_root = himl_histo_root_dir.parent.parent @@ -14,14 +16,14 @@ from health_azure.utils import get_workspace # noqa: E402 -def mount_dataset(dataset_id: str) -> str: +def mount_dataset(dataset_id: str) -> Any: ws = get_workspace() target_folder = "/tmp/datasets/" + dataset_id dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True) - dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws) + _, mount_ctx = dataset.to_input_dataset_local(ws) assert mount_ctx is not None # for mypy mount_ctx.start() - return str(dataset_mount_folder) + return mount_ctx if __name__ == '__main__': @@ -31,4 +33,9 @@ parser.add_argument('--dataset_id', type=str, help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk') args = parser.parse_args() - mount_dataset(args.dataset_id) + # It is essential that the mount context is returned from the mounting function and referenced here. + # If not, mounting will be stopped, and the files are no longer available. + _ = mount_dataset(args.dataset_id) + print("The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`") + while True: + time.sleep(60)
{"golden_diff": "diff --git a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n--- a/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n+++ b/hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py\n@@ -4,6 +4,8 @@\n # ------------------------------------------------------------------------------------------\n from pathlib import Path\n import sys\n+import time\n+from typing import Any\n \n himl_histo_root_dir = Path(__file__).parent.parent.parent\n himl_root = himl_histo_root_dir.parent.parent\n@@ -14,14 +16,14 @@\n from health_azure.utils import get_workspace # noqa: E402\n \n \n-def mount_dataset(dataset_id: str) -> str:\n+def mount_dataset(dataset_id: str) -> Any:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n- dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n+ _, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n- return str(dataset_mount_folder)\n+ return mount_ctx\n \n \n if __name__ == '__main__':\n@@ -31,4 +33,9 @@\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n- mount_dataset(args.dataset_id)\n+ # It is essential that the mount context is returned from the mounting function and referenced here.\n+ # If not, mounting will be stopped, and the files are no longer available.\n+ _ = mount_dataset(args.dataset_id)\n+ print(\"The mounted dataset will only be available while this script is running. Press Ctrl-C to terminate it.`\")\n+ while True:\n+ time.sleep(60)\n", "issue": "Histo dataset mounting script does not show files permanently\nFiles are only visible while the script is running, but disappear once terminated. \r\nThis could be a consequence of the library updates in #455.\n", "before_files": [{"content": "# ------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.\n# ------------------------------------------------------------------------------------------\nfrom pathlib import Path\nimport sys\n\nhiml_histo_root_dir = Path(__file__).parent.parent.parent\nhiml_root = himl_histo_root_dir.parent.parent\nhiml_azure_package_root = himl_root / \"hi-ml-azure\" / \"src\"\nsys.path.insert(0, str(himl_azure_package_root))\n\nfrom health_azure import DatasetConfig # noqa: E402\nfrom health_azure.utils import get_workspace # noqa: E402\n\n\ndef mount_dataset(dataset_id: str) -> str:\n ws = get_workspace()\n target_folder = \"/tmp/datasets/\" + dataset_id\n dataset = DatasetConfig(name=dataset_id, target_folder=target_folder, use_mounting=True)\n dataset_mount_folder, mount_ctx = dataset.to_input_dataset_local(ws)\n assert mount_ctx is not None # for mypy\n mount_ctx.start()\n return str(dataset_mount_folder)\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n # Run this script as \"python mount_azure_dataset.py --dataset_id TCGA-CRCk\"\n parser.add_argument('--dataset_id', type=str,\n help='Name of the Azure dataset e.g. PANDA or TCGA-CRCk')\n args = parser.parse_args()\n mount_dataset(args.dataset_id)\n", "path": "hi-ml-histopathology/src/histopathology/scripts/mount_azure_dataset.py"}]}
997
463
gh_patches_debug_22820
rasdani/github-patches
git_diff
bokeh__bokeh-8672
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use utf-8 encoding to open notebook file. Currently, `ipynb` file is opened by following code: ``` with open(filename) as f: pass ``` https://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117 I suggest to open the file with `encoding="utf-8"`, otherwise the serve command will raise error on Windows system. </issue> <code> [start of bokeh/application/handlers/notebook.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. 3 # All rights reserved. 4 # 5 # The full license is in the file LICENSE.txt, distributed with this software. 6 #----------------------------------------------------------------------------- 7 ''' Provide a Bokeh Application Handler to build up documents by running 8 the code from Jupyter notebook (``.ipynb``) files. 9 10 This handler is configured with the filename of a Jupyter notebook. When a 11 Bokeh application calls ``modify_doc``, the code from all the notebook cells 12 is collected and executed to process a new Document for a session. When the 13 notebook code is executed, the Document being modified will be available as 14 ``curdoc``, and any optionally provided ``args`` will be available as 15 ``sys.argv``. 16 17 ''' 18 19 #----------------------------------------------------------------------------- 20 # Boilerplate 21 #----------------------------------------------------------------------------- 22 from __future__ import absolute_import, division, print_function, unicode_literals 23 24 import re 25 import sys 26 import logging 27 log = logging.getLogger(__name__) 28 29 #----------------------------------------------------------------------------- 30 # Imports 31 #----------------------------------------------------------------------------- 32 33 # Standard library imports 34 35 # External imports 36 37 # Bokeh imports 38 from ...util.dependencies import import_required 39 from .code import CodeHandler 40 41 #----------------------------------------------------------------------------- 42 # Globals and constants 43 #----------------------------------------------------------------------------- 44 45 __all__ = ( 46 'NotebookHandler', 47 ) 48 49 #----------------------------------------------------------------------------- 50 # General API 51 #----------------------------------------------------------------------------- 52 53 #----------------------------------------------------------------------------- 54 # Dev API 55 #----------------------------------------------------------------------------- 56 57 class NotebookHandler(CodeHandler): 58 ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh 59 Documents. 60 61 ''' 62 63 _logger_text = "%s: call to %s() ignored when running notebooks with the 'bokeh' command." 64 65 _origin = "Notebook" 66 67 def __init__(self, *args, **kwargs): 68 ''' 69 70 Keywords: 71 filename (str) : a path to a Jupyter notebook (".ipynb") file 72 73 ''' 74 nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.') 75 nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.') 76 77 if 'filename' not in kwargs: 78 raise ValueError('Must pass a filename to NotebookHandler') 79 80 81 class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor): 82 """ 83 Preprocessor to convert notebooks to Python source while stripping 84 out all magics (i.e IPython specific syntax). 85 """ 86 87 _magic_pattern = re.compile(r'^\s*(?P<magic>%%\w\w+)($|(\s+))') 88 89 def strip_magics(self, source): 90 """ 91 Given the source of a cell, filter out all cell and line magics. 92 """ 93 filtered=[] 94 for line in source.splitlines(): 95 match = self._magic_pattern.match(line) 96 if match is None: 97 filtered.append(line) 98 else: 99 msg = 'Stripping out IPython magic {magic} in code cell {cell}' 100 message = msg.format(cell=self._cell_counter, magic=match.group('magic')) 101 log.warning(message) 102 return '\n'.join(filtered) 103 104 def preprocess_cell(self, cell, resources, index): 105 if cell['cell_type'] == 'code': 106 self._cell_counter += 1 107 cell['source'] = self.strip_magics(cell['source']) 108 return cell, resources 109 110 def __call__(self, nb, resources): 111 self._cell_counter = 0 112 return self.preprocess(nb,resources) 113 114 preprocessors=[StripMagicsProcessor()] 115 filename = kwargs['filename'] 116 117 with open(filename) as f: 118 nb = nbformat.read(f, nbformat.NO_CONVERT) 119 exporter = nbconvert.PythonExporter() 120 121 for preprocessor in preprocessors: 122 exporter.register_preprocessor(preprocessor) 123 124 source, _ = exporter.from_notebook_node(nb) 125 source = source.replace('get_ipython().run_line_magic', '') 126 source = source.replace('get_ipython().magic', '') 127 128 if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA 129 source = source.encode('utf-8') 130 kwargs['source'] = source 131 132 super(NotebookHandler, self).__init__(*args, **kwargs) 133 134 #----------------------------------------------------------------------------- 135 # Private API 136 #----------------------------------------------------------------------------- 137 138 #----------------------------------------------------------------------------- 139 # Code 140 #----------------------------------------------------------------------------- 141 [end of bokeh/application/handlers/notebook.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py --- a/bokeh/application/handlers/notebook.py +++ b/bokeh/application/handlers/notebook.py @@ -21,8 +21,6 @@ #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals -import re -import sys import logging log = logging.getLogger(__name__) @@ -31,6 +29,9 @@ #----------------------------------------------------------------------------- # Standard library imports +import io +import re +import sys # External imports @@ -114,7 +115,7 @@ preprocessors=[StripMagicsProcessor()] filename = kwargs['filename'] - with open(filename) as f: + with io.open(filename, encoding="utf-8") as f: nb = nbformat.read(f, nbformat.NO_CONVERT) exporter = nbconvert.PythonExporter()
{"golden_diff": "diff --git a/bokeh/application/handlers/notebook.py b/bokeh/application/handlers/notebook.py\n--- a/bokeh/application/handlers/notebook.py\n+++ b/bokeh/application/handlers/notebook.py\n@@ -21,8 +21,6 @@\n #-----------------------------------------------------------------------------\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n-import re\n-import sys\n import logging\n log = logging.getLogger(__name__)\n \n@@ -31,6 +29,9 @@\n #-----------------------------------------------------------------------------\n \n # Standard library imports\n+import io\n+import re\n+import sys\n \n # External imports\n \n@@ -114,7 +115,7 @@\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n \n- with open(filename) as f:\n+ with io.open(filename, encoding=\"utf-8\") as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n", "issue": "Use utf-8 encoding to open notebook file.\nCurrently, `ipynb` file is opened by following code:\r\n\r\n```\r\n with open(filename) as f:\r\n pass\r\n```\r\n\r\nhttps://github.com/bokeh/bokeh/blob/master/bokeh/application/handlers/notebook.py#L117\r\n\r\nI suggest to open the file with `encoding=\"utf-8\"`, otherwise the serve command will raise error on Windows system.\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Provide a Bokeh Application Handler to build up documents by running\nthe code from Jupyter notebook (``.ipynb``) files.\n\nThis handler is configured with the filename of a Jupyter notebook. When a\nBokeh application calls ``modify_doc``, the code from all the notebook cells\nis collected and executed to process a new Document for a session. When the\nnotebook code is executed, the Document being modified will be available as\n``curdoc``, and any optionally provided ``args`` will be available as\n``sys.argv``.\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport re\nimport sys\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\n\n# External imports\n\n# Bokeh imports\nfrom ...util.dependencies import import_required\nfrom .code import CodeHandler\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'NotebookHandler',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass NotebookHandler(CodeHandler):\n ''' A Handler that uses code in a Jupyter notebook for modifying Bokeh\n Documents.\n\n '''\n\n _logger_text = \"%s: call to %s() ignored when running notebooks with the 'bokeh' command.\"\n\n _origin = \"Notebook\"\n\n def __init__(self, *args, **kwargs):\n '''\n\n Keywords:\n filename (str) : a path to a Jupyter notebook (\".ipynb\") file\n\n '''\n nbformat = import_required('nbformat', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n nbconvert = import_required('nbconvert', 'The Bokeh notebook application handler requires Jupyter Notebook to be installed.')\n\n if 'filename' not in kwargs:\n raise ValueError('Must pass a filename to NotebookHandler')\n\n\n class StripMagicsProcessor(nbconvert.preprocessors.Preprocessor):\n \"\"\"\n Preprocessor to convert notebooks to Python source while stripping\n out all magics (i.e IPython specific syntax).\n \"\"\"\n\n _magic_pattern = re.compile(r'^\\s*(?P<magic>%%\\w\\w+)($|(\\s+))')\n\n def strip_magics(self, source):\n \"\"\"\n Given the source of a cell, filter out all cell and line magics.\n \"\"\"\n filtered=[]\n for line in source.splitlines():\n match = self._magic_pattern.match(line)\n if match is None:\n filtered.append(line)\n else:\n msg = 'Stripping out IPython magic {magic} in code cell {cell}'\n message = msg.format(cell=self._cell_counter, magic=match.group('magic'))\n log.warning(message)\n return '\\n'.join(filtered)\n\n def preprocess_cell(self, cell, resources, index):\n if cell['cell_type'] == 'code':\n self._cell_counter += 1\n cell['source'] = self.strip_magics(cell['source'])\n return cell, resources\n\n def __call__(self, nb, resources):\n self._cell_counter = 0\n return self.preprocess(nb,resources)\n\n preprocessors=[StripMagicsProcessor()]\n filename = kwargs['filename']\n\n with open(filename) as f:\n nb = nbformat.read(f, nbformat.NO_CONVERT)\n exporter = nbconvert.PythonExporter()\n\n for preprocessor in preprocessors:\n exporter.register_preprocessor(preprocessor)\n\n source, _ = exporter.from_notebook_node(nb)\n source = source.replace('get_ipython().run_line_magic', '')\n source = source.replace('get_ipython().magic', '')\n\n if sys.version_info.major == 2 and isinstance(source, unicode): # NOQA\n source = source.encode('utf-8')\n kwargs['source'] = source\n\n super(NotebookHandler, self).__init__(*args, **kwargs)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/application/handlers/notebook.py"}]}
1,895
219
gh_patches_debug_31100
rasdani/github-patches
git_diff
gratipay__gratipay.com-2498
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fix regression in receiving amounts Stripe is giving to Bundler now, but Bundler's "receiving" doesn't reflect this, due to a regression with #2352. [IRC](https://botbot.me/freenode/gittip/2014-06-12/?msg=16141806&page=1) cc: @indirect @gdb </issue> <code> [start of gittip/billing/__init__.py] 1 """This module encapsulates billing logic and db access. 2 3 There are three pieces of information for each participant related to billing: 4 5 balanced_customer_href 6 * NULL - This participant has never been billed. 7 * 'deadbeef' - This participant has had a Balanced account created for 8 them, either by adding a credit card or a bank account. 9 last_bill_result 10 * NULL - This participant has not had their credit card charged yet. 11 * '' - This participant has a working card. 12 * <message> - An error message. 13 last_ach_result 14 * NULL - This participant has not wired up a bank account yet. 15 * '' - This participant has a working bank account. 16 * <message> - An error message. 17 18 """ 19 from __future__ import unicode_literals 20 21 import balanced 22 from aspen.utils import typecheck 23 24 25 def get_balanced_account(db, username, balanced_customer_href): 26 """Find or create a balanced.Account. 27 """ 28 typecheck( username, unicode 29 , balanced_customer_href, (unicode, None) 30 ) 31 32 if balanced_customer_href is None: 33 customer = balanced.Customer(meta={ 34 'username': username, 35 }).save() 36 BALANCED_ACCOUNT = """\ 37 38 UPDATE participants 39 SET balanced_customer_href=%s 40 WHERE username=%s 41 42 """ 43 db.run(BALANCED_ACCOUNT, (customer.href, username)) 44 else: 45 customer = balanced.Customer.fetch(balanced_customer_href) 46 return customer 47 48 49 def associate(db, thing, username, balanced_customer_href, balanced_thing_uri): 50 """Given four unicodes, return a unicode. 51 52 This function attempts to associate the credit card or bank account details 53 referenced by balanced_thing_uri with a Balanced Account. If it fails we 54 log and return a unicode describing the failure. Even for failure we keep 55 balanced_customer_href; we don't reset it to None/NULL. It's useful for 56 loading the previous (bad) info from Balanced in order to prepopulate the 57 form. 58 59 """ 60 typecheck( username, unicode 61 , balanced_customer_href, (unicode, None, balanced.Customer) 62 , balanced_thing_uri, unicode 63 , thing, unicode 64 ) 65 66 if isinstance(balanced_customer_href, balanced.Customer): 67 balanced_account = balanced_customer_href 68 else: 69 balanced_account = get_balanced_account( db 70 , username 71 , balanced_customer_href 72 ) 73 invalidate_on_balanced(thing, balanced_account.href) 74 SQL = "UPDATE participants SET last_%s_result=%%s WHERE username=%%s" 75 try: 76 if thing == "credit card": 77 SQL %= "bill" 78 obj = balanced.Card.fetch(balanced_thing_uri) 79 #add = balanced_account.add_card 80 81 else: 82 assert thing == "bank account", thing # sanity check 83 SQL %= "ach" 84 obj = balanced.BankAccount.fetch(balanced_thing_uri) 85 #add = balanced_account.add_bank_account 86 87 obj.associate_to_customer(balanced_account) 88 except balanced.exc.HTTPError as err: 89 error = err.message.message.decode('UTF-8') # XXX UTF-8? 90 else: 91 error = '' 92 typecheck(error, unicode) 93 94 db.run(SQL, (error, username)) 95 return error 96 97 98 def invalidate_on_balanced(thing, balanced_customer_href): 99 """XXX Things in balanced cannot be deleted at the moment. 100 101 Instead we mark all valid cards as invalid which will restrict against 102 anyone being able to issue charges against them in the future. 103 104 See: https://github.com/balanced/balanced-api/issues/22 105 106 """ 107 assert thing in ("credit card", "bank account") 108 typecheck(balanced_customer_href, (str, unicode)) 109 110 customer = balanced.Customer.fetch(balanced_customer_href) 111 things = customer.cards if thing == "credit card" else customer.bank_accounts 112 113 for _thing in things: 114 _thing.unstore() 115 116 117 def clear(db, thing, username, balanced_customer_href): 118 typecheck( thing, unicode 119 , username, unicode 120 , balanced_customer_href, (unicode, str) 121 ) 122 assert thing in ("credit card", "bank account"), thing 123 invalidate_on_balanced(thing, balanced_customer_href) 124 CLEAR = """\ 125 126 UPDATE participants 127 SET last_%s_result=NULL 128 WHERE username=%%s 129 130 """ % ("bill" if thing == "credit card" else "ach") 131 db.run(CLEAR, (username,)) 132 133 134 def store_error(db, thing, username, msg): 135 typecheck(thing, unicode, username, unicode, msg, unicode) 136 assert thing in ("credit card", "bank account"), thing 137 ERROR = """\ 138 139 UPDATE participants 140 SET last_%s_result=%%s 141 WHERE username=%%s 142 143 """ % ("bill" if thing == "credit card" else "ach") 144 db.run(ERROR, (msg, username)) 145 146 147 class BalancedThing(object): 148 """Represent either a credit card or a bank account. 149 """ 150 151 thing_type = None # either 'card' or 'bank_account' 152 keys_to_attr_paths = None # set to a mapping in subclasses 153 154 _customer = None # underlying balanced.Customer object 155 _thing = None # underlying balanced.{BankAccount,Card} object 156 157 def __getitem__(self, key): 158 """Given a name, return a unicode. 159 160 Allow subclasses to provide a flat set of keys, which, under the hood, 161 might be nested attributes and/or keys. The traversal path is relative 162 to _thing (not self!). 163 164 """ 165 attr_path = self.keys_to_attr_paths.get(key, key) 166 167 out = None 168 if self._customer is not None and self._thing is not None: 169 out = self._thing 170 for val in attr_path.split('.'): 171 if type(out) is dict: 172 # this lets us reach into the meta dict 173 out = out.get(val) 174 else: 175 try: 176 out = getattr(out, val) 177 except AttributeError: 178 raise KeyError("{} not found".format(val)) 179 if out is None: 180 break 181 182 if out is None: 183 # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161. 184 out = '' 185 186 return out 187 188 def __init__(self, balanced_customer_href): 189 """Given a Balanced account_uri, load data from Balanced. 190 """ 191 if balanced_customer_href is None: 192 return 193 194 # XXX Indexing is borken. See: 195 # https://github.com/balanced/balanced-python/issues/10 196 197 self._customer = balanced.Customer.fetch(balanced_customer_href) 198 199 things = getattr(self._customer, self.thing_type+'s')\ 200 .filter(is_valid=True).all() 201 nvalid = len(things) 202 203 if nvalid == 0: 204 self._thing = None 205 elif nvalid == 1: 206 self._thing = things[0] 207 else: 208 msg = "%s has %d valid %ss" 209 msg %= (balanced_customer_href, len(things), self.thing_type) 210 raise RuntimeError(msg) 211 212 @property 213 def is_setup(self): 214 return self._thing is not None 215 216 217 class BalancedCard(BalancedThing): 218 """This is a dict-like wrapper around a Balanced credit card. 219 """ 220 221 thing_type = 'card' 222 223 keys_to_attr_paths = { 224 'id': 'customer.href', 225 'address_1': 'address.line1', 226 'address_2': 'meta.address_2', 227 'country': 'meta.country', 228 'city_town': 'meta.city_town', 229 'zip': 'address.postal_code', 230 # gittip is saving the state in the meta field 231 # for compatibility with legacy customers 232 'state': 'meta.region', 233 'last4': 'number', 234 'last_four': 'number', 235 'expiration_month': 'expiration_month', 236 'expiration_year': 'expiration_year', 237 } 238 239 240 class BalancedBankAccount(BalancedThing): 241 """This is a dict-like wrapper around a Balanced bank account. 242 """ 243 244 thing_type = 'bank_account' 245 246 keys_to_attr_paths = { 247 'customer_href': 'customer.href', 248 } 249 [end of gittip/billing/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py --- a/gittip/billing/__init__.py +++ b/gittip/billing/__init__.py @@ -22,6 +22,29 @@ from aspen.utils import typecheck +def update_receiving_amounts(db, tipper, error): + """Update receiving amounts of participants tipped by given tipper. + """ + from gittip.models.participant import Participant + tipper = Participant.from_username(tipper) + if tipper.is_suspicious or tipper.last_bill_result == error: + return + if error == '': + op = '+' + else: + op = '-' + db.run(""" + UPDATE participants + SET receiving = (receiving {0} amount) + FROM ( SELECT DISTINCT ON (tippee) tippee, amount + FROM tips + WHERE tipper=%(tipper)s + ORDER BY tippee, mtime DESC + ) foo + WHERE tippee = username; + """.format(op), dict(tipper=tipper)) + + def get_balanced_account(db, username, balanced_customer_href): """Find or create a balanced.Account. """ @@ -92,6 +115,8 @@ typecheck(error, unicode) db.run(SQL, (error, username)) + if thing == "credit card": + update_receiving_amounts(db, username, error) return error @@ -129,6 +154,8 @@ """ % ("bill" if thing == "credit card" else "ach") db.run(CLEAR, (username,)) + if thing == "credit card": + update_receiving_amounts(db, username, None) def store_error(db, thing, username, msg): @@ -142,6 +169,8 @@ """ % ("bill" if thing == "credit card" else "ach") db.run(ERROR, (msg, username)) + if thing == "credit card": + update_receiving_amounts(db, username, msg) class BalancedThing(object):
{"golden_diff": "diff --git a/gittip/billing/__init__.py b/gittip/billing/__init__.py\n--- a/gittip/billing/__init__.py\n+++ b/gittip/billing/__init__.py\n@@ -22,6 +22,29 @@\n from aspen.utils import typecheck\n \n \n+def update_receiving_amounts(db, tipper, error):\n+ \"\"\"Update receiving amounts of participants tipped by given tipper.\n+ \"\"\"\n+ from gittip.models.participant import Participant\n+ tipper = Participant.from_username(tipper)\n+ if tipper.is_suspicious or tipper.last_bill_result == error:\n+ return\n+ if error == '':\n+ op = '+'\n+ else:\n+ op = '-'\n+ db.run(\"\"\"\n+ UPDATE participants\n+ SET receiving = (receiving {0} amount)\n+ FROM ( SELECT DISTINCT ON (tippee) tippee, amount\n+ FROM tips\n+ WHERE tipper=%(tipper)s\n+ ORDER BY tippee, mtime DESC\n+ ) foo\n+ WHERE tippee = username;\n+ \"\"\".format(op), dict(tipper=tipper))\n+\n+\n def get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n@@ -92,6 +115,8 @@\n typecheck(error, unicode)\n \n db.run(SQL, (error, username))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, error)\n return error\n \n \n@@ -129,6 +154,8 @@\n \n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, None)\n \n \n def store_error(db, thing, username, msg):\n@@ -142,6 +169,8 @@\n \n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n+ if thing == \"credit card\":\n+ update_receiving_amounts(db, username, msg)\n \n \n class BalancedThing(object):\n", "issue": "fix regression in receiving amounts\nStripe is giving to Bundler now, but Bundler's \"receiving\" doesn't reflect this, due to a regression with #2352. [IRC](https://botbot.me/freenode/gittip/2014-06-12/?msg=16141806&page=1)\n\ncc: @indirect @gdb\n\n", "before_files": [{"content": "\"\"\"This module encapsulates billing logic and db access.\n\nThere are three pieces of information for each participant related to billing:\n\n balanced_customer_href\n * NULL - This participant has never been billed.\n * 'deadbeef' - This participant has had a Balanced account created for\n them, either by adding a credit card or a bank account.\n last_bill_result\n * NULL - This participant has not had their credit card charged yet.\n * '' - This participant has a working card.\n * <message> - An error message.\n last_ach_result\n * NULL - This participant has not wired up a bank account yet.\n * '' - This participant has a working bank account.\n * <message> - An error message.\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport balanced\nfrom aspen.utils import typecheck\n\n\ndef get_balanced_account(db, username, balanced_customer_href):\n \"\"\"Find or create a balanced.Account.\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None)\n )\n\n if balanced_customer_href is None:\n customer = balanced.Customer(meta={\n 'username': username,\n }).save()\n BALANCED_ACCOUNT = \"\"\"\\\n\n UPDATE participants\n SET balanced_customer_href=%s\n WHERE username=%s\n\n \"\"\"\n db.run(BALANCED_ACCOUNT, (customer.href, username))\n else:\n customer = balanced.Customer.fetch(balanced_customer_href)\n return customer\n\n\ndef associate(db, thing, username, balanced_customer_href, balanced_thing_uri):\n \"\"\"Given four unicodes, return a unicode.\n\n This function attempts to associate the credit card or bank account details\n referenced by balanced_thing_uri with a Balanced Account. If it fails we\n log and return a unicode describing the failure. Even for failure we keep\n balanced_customer_href; we don't reset it to None/NULL. It's useful for\n loading the previous (bad) info from Balanced in order to prepopulate the\n form.\n\n \"\"\"\n typecheck( username, unicode\n , balanced_customer_href, (unicode, None, balanced.Customer)\n , balanced_thing_uri, unicode\n , thing, unicode\n )\n\n if isinstance(balanced_customer_href, balanced.Customer):\n balanced_account = balanced_customer_href\n else:\n balanced_account = get_balanced_account( db\n , username\n , balanced_customer_href\n )\n invalidate_on_balanced(thing, balanced_account.href)\n SQL = \"UPDATE participants SET last_%s_result=%%s WHERE username=%%s\"\n try:\n if thing == \"credit card\":\n SQL %= \"bill\"\n obj = balanced.Card.fetch(balanced_thing_uri)\n #add = balanced_account.add_card\n\n else:\n assert thing == \"bank account\", thing # sanity check\n SQL %= \"ach\"\n obj = balanced.BankAccount.fetch(balanced_thing_uri)\n #add = balanced_account.add_bank_account\n\n obj.associate_to_customer(balanced_account)\n except balanced.exc.HTTPError as err:\n error = err.message.message.decode('UTF-8') # XXX UTF-8?\n else:\n error = ''\n typecheck(error, unicode)\n\n db.run(SQL, (error, username))\n return error\n\n\ndef invalidate_on_balanced(thing, balanced_customer_href):\n \"\"\"XXX Things in balanced cannot be deleted at the moment.\n\n Instead we mark all valid cards as invalid which will restrict against\n anyone being able to issue charges against them in the future.\n\n See: https://github.com/balanced/balanced-api/issues/22\n\n \"\"\"\n assert thing in (\"credit card\", \"bank account\")\n typecheck(balanced_customer_href, (str, unicode))\n\n customer = balanced.Customer.fetch(balanced_customer_href)\n things = customer.cards if thing == \"credit card\" else customer.bank_accounts\n\n for _thing in things:\n _thing.unstore()\n\n\ndef clear(db, thing, username, balanced_customer_href):\n typecheck( thing, unicode\n , username, unicode\n , balanced_customer_href, (unicode, str)\n )\n assert thing in (\"credit card\", \"bank account\"), thing\n invalidate_on_balanced(thing, balanced_customer_href)\n CLEAR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=NULL\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(CLEAR, (username,))\n\n\ndef store_error(db, thing, username, msg):\n typecheck(thing, unicode, username, unicode, msg, unicode)\n assert thing in (\"credit card\", \"bank account\"), thing\n ERROR = \"\"\"\\\n\n UPDATE participants\n SET last_%s_result=%%s\n WHERE username=%%s\n\n \"\"\" % (\"bill\" if thing == \"credit card\" else \"ach\")\n db.run(ERROR, (msg, username))\n\n\nclass BalancedThing(object):\n \"\"\"Represent either a credit card or a bank account.\n \"\"\"\n\n thing_type = None # either 'card' or 'bank_account'\n keys_to_attr_paths = None # set to a mapping in subclasses\n\n _customer = None # underlying balanced.Customer object\n _thing = None # underlying balanced.{BankAccount,Card} object\n\n def __getitem__(self, key):\n \"\"\"Given a name, return a unicode.\n\n Allow subclasses to provide a flat set of keys, which, under the hood,\n might be nested attributes and/or keys. The traversal path is relative\n to _thing (not self!).\n\n \"\"\"\n attr_path = self.keys_to_attr_paths.get(key, key)\n\n out = None\n if self._customer is not None and self._thing is not None:\n out = self._thing\n for val in attr_path.split('.'):\n if type(out) is dict:\n # this lets us reach into the meta dict\n out = out.get(val)\n else:\n try:\n out = getattr(out, val)\n except AttributeError:\n raise KeyError(\"{} not found\".format(val))\n if out is None:\n break\n\n if out is None:\n # Default to ''; see https://github.com/gittip/www.gittip.com/issues/2161.\n out = ''\n\n return out\n\n def __init__(self, balanced_customer_href):\n \"\"\"Given a Balanced account_uri, load data from Balanced.\n \"\"\"\n if balanced_customer_href is None:\n return\n\n # XXX Indexing is borken. See:\n # https://github.com/balanced/balanced-python/issues/10\n\n self._customer = balanced.Customer.fetch(balanced_customer_href)\n\n things = getattr(self._customer, self.thing_type+'s')\\\n .filter(is_valid=True).all()\n nvalid = len(things)\n\n if nvalid == 0:\n self._thing = None\n elif nvalid == 1:\n self._thing = things[0]\n else:\n msg = \"%s has %d valid %ss\"\n msg %= (balanced_customer_href, len(things), self.thing_type)\n raise RuntimeError(msg)\n\n @property\n def is_setup(self):\n return self._thing is not None\n\n\nclass BalancedCard(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced credit card.\n \"\"\"\n\n thing_type = 'card'\n\n keys_to_attr_paths = {\n 'id': 'customer.href',\n 'address_1': 'address.line1',\n 'address_2': 'meta.address_2',\n 'country': 'meta.country',\n 'city_town': 'meta.city_town',\n 'zip': 'address.postal_code',\n # gittip is saving the state in the meta field\n # for compatibility with legacy customers\n 'state': 'meta.region',\n 'last4': 'number',\n 'last_four': 'number',\n 'expiration_month': 'expiration_month',\n 'expiration_year': 'expiration_year',\n }\n\n\nclass BalancedBankAccount(BalancedThing):\n \"\"\"This is a dict-like wrapper around a Balanced bank account.\n \"\"\"\n\n thing_type = 'bank_account'\n\n keys_to_attr_paths = {\n 'customer_href': 'customer.href',\n }\n", "path": "gittip/billing/__init__.py"}]}
3,077
491
gh_patches_debug_6081
rasdani/github-patches
git_diff
CTFd__CTFd-1963
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add CSV examples for CSV Import Add CSV examples for CSV Import </issue> <code> [start of CTFd/utils/csv/__init__.py] 1 import csv 2 import json 3 from io import BytesIO, StringIO 4 5 from CTFd.models import ( 6 Flags, 7 Hints, 8 Tags, 9 TeamFields, 10 Teams, 11 UserFields, 12 Users, 13 db, 14 get_class_by_tablename, 15 ) 16 from CTFd.plugins.challenges import get_chal_class 17 from CTFd.utils.config import is_teams_mode, is_users_mode 18 from CTFd.utils.scores import get_standings 19 20 21 def get_dumpable_tables(): 22 csv_keys = list(CSV_KEYS.keys()) 23 db_keys = list(db.metadata.tables.keys()) 24 tables = csv_keys + db_keys 25 table_keys = list(zip(tables, tables)) 26 return table_keys 27 28 29 def dump_csv(name): 30 dump_func = CSV_KEYS.get(name) 31 if dump_func: 32 return dump_func() 33 elif get_class_by_tablename(name): 34 return dump_database_table(tablename=name) 35 else: 36 raise KeyError 37 38 39 def dump_scoreboard_csv(): 40 # TODO: Add fields to scoreboard data 41 temp = StringIO() 42 writer = csv.writer(temp) 43 44 standings = get_standings() 45 46 # Get all user fields in a specific order 47 user_fields = UserFields.query.all() 48 user_field_ids = [f.id for f in user_fields] 49 user_field_names = [f.name for f in user_fields] 50 51 if is_teams_mode(): 52 team_fields = TeamFields.query.all() 53 team_field_ids = [f.id for f in team_fields] 54 team_field_names = [f.name for f in team_fields] 55 56 header = ( 57 [ 58 "place", 59 "team", 60 "team id", 61 "score", 62 "member name", 63 "member id", 64 "member email", 65 "member score", 66 ] 67 + user_field_names 68 + team_field_names 69 ) 70 writer.writerow(header) 71 72 for i, standing in enumerate(standings): 73 team = Teams.query.filter_by(id=standing.account_id).first() 74 75 # Build field entries using the order of the field values 76 team_field_entries = {f.field_id: f.value for f in team.field_entries} 77 team_field_values = [ 78 team_field_entries.get(f_id, "") for f_id in team_field_ids 79 ] 80 team_row = [ 81 i + 1, 82 team.name, 83 team.id, 84 standing.score, 85 "", 86 "", 87 ] + team_field_values 88 89 writer.writerow(team_row) 90 91 for member in team.members: 92 user_field_entries = {f.field_id: f.value for f in member.field_entries} 93 user_field_values = [ 94 user_field_entries.get(f_id, "") for f_id in user_field_ids 95 ] 96 user_row = [ 97 "", 98 "", 99 "", 100 "", 101 member.name, 102 member.id, 103 member.email, 104 member.score, 105 ] + user_field_values 106 writer.writerow(user_row) 107 elif is_users_mode(): 108 header = ["place", "user", "score"] + user_field_names 109 writer.writerow(header) 110 111 for i, standing in enumerate(standings): 112 user = Users.query.filter_by(id=standing.account_id).first() 113 114 # Build field entries using the order of the field values 115 user_field_entries = {f.field_id: f.value for f in user.field_entries} 116 user_field_values = [ 117 user_field_entries.get(f_id, "") for f_id in user_field_ids 118 ] 119 user_row = [i + 1, user.name, standing.score] + user_field_values 120 writer.writerow(user_row) 121 122 # In Python 3 send_file requires bytes 123 output = BytesIO() 124 output.write(temp.getvalue().encode("utf-8")) 125 output.seek(0) 126 temp.close() 127 128 return output 129 130 131 def dump_users_with_fields_csv(): 132 temp = StringIO() 133 writer = csv.writer(temp) 134 135 user_fields = UserFields.query.all() 136 user_field_ids = [f.id for f in user_fields] 137 user_field_names = [f.name for f in user_fields] 138 139 header = [column.name for column in Users.__mapper__.columns] + user_field_names 140 writer.writerow(header) 141 142 responses = Users.query.all() 143 144 for curr in responses: 145 user_field_entries = {f.field_id: f.value for f in curr.field_entries} 146 user_field_values = [ 147 user_field_entries.get(f_id, "") for f_id in user_field_ids 148 ] 149 user_row = [ 150 getattr(curr, column.name) for column in Users.__mapper__.columns 151 ] + user_field_values 152 writer.writerow(user_row) 153 154 temp.seek(0) 155 156 # In Python 3 send_file requires bytes 157 output = BytesIO() 158 output.write(temp.getvalue().encode("utf-8")) 159 output.seek(0) 160 temp.close() 161 162 return output 163 164 165 def dump_teams_with_fields_csv(): 166 temp = StringIO() 167 writer = csv.writer(temp) 168 169 team_fields = TeamFields.query.all() 170 team_field_ids = [f.id for f in team_fields] 171 team_field_names = [f.name for f in team_fields] 172 173 header = [column.name for column in Teams.__mapper__.columns] + team_field_names 174 writer.writerow(header) 175 176 responses = Teams.query.all() 177 178 for curr in responses: 179 team_field_entries = {f.field_id: f.value for f in curr.field_entries} 180 team_field_values = [ 181 team_field_entries.get(f_id, "") for f_id in team_field_ids 182 ] 183 184 team_row = [ 185 getattr(curr, column.name) for column in Teams.__mapper__.columns 186 ] + team_field_values 187 188 writer.writerow(team_row) 189 190 temp.seek(0) 191 192 # In Python 3 send_file requires bytes 193 output = BytesIO() 194 output.write(temp.getvalue().encode("utf-8")) 195 output.seek(0) 196 temp.close() 197 198 return output 199 200 201 def dump_database_table(tablename): 202 # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information. 203 model = get_class_by_tablename(tablename) 204 205 if model is None: 206 raise KeyError("Unknown database table") 207 208 temp = StringIO() 209 writer = csv.writer(temp) 210 211 header = [column.name for column in model.__mapper__.columns] 212 writer.writerow(header) 213 214 responses = model.query.all() 215 216 for curr in responses: 217 writer.writerow( 218 [getattr(curr, column.name) for column in model.__mapper__.columns] 219 ) 220 221 temp.seek(0) 222 223 # In Python 3 send_file requires bytes 224 output = BytesIO() 225 output.write(temp.getvalue().encode("utf-8")) 226 output.seek(0) 227 temp.close() 228 229 return output 230 231 232 def load_users_csv(dict_reader): 233 for line in dict_reader: 234 result = Users(**line) 235 db.session.add(result) 236 db.session.commit() 237 return True 238 239 240 def load_teams_csv(dict_reader): 241 for line in dict_reader: 242 result = Teams(**line) 243 db.session.add(result) 244 db.session.commit() 245 return True 246 247 248 def load_challenges_csv(dict_reader): 249 for line in dict_reader: 250 flags = line.pop("flags", None) 251 tags = line.pop("tags", None) 252 hints = line.pop("hints", None) 253 challenge_type = line.pop("type", "standard") 254 255 # Load in custome type_data 256 type_data = json.loads(line.pop("type_data", "{}")) 257 line.update(type_data) 258 259 ChallengeClass = get_chal_class(challenge_type) 260 challenge = ChallengeClass.challenge_model(**line) 261 db.session.add(challenge) 262 db.session.commit() 263 264 if flags: 265 flags = [flag.strip() for flag in flags.split(",")] 266 for flag in flags: 267 f = Flags(type="static", challenge_id=challenge.id, content=flag,) 268 db.session.add(f) 269 db.session.commit() 270 271 if tags: 272 tags = [tag.strip() for tag in tags.split(",")] 273 for tag in tags: 274 t = Tags(challenge_id=challenge.id, value=tag,) 275 db.session.add(t) 276 db.session.commit() 277 278 if hints: 279 hints = [hint.strip() for hint in hints.split(",")] 280 for hint in hints: 281 h = Hints(challenge_id=challenge.id, content=hint,) 282 db.session.add(h) 283 db.session.commit() 284 return True 285 286 287 CSV_KEYS = { 288 "scoreboard": dump_scoreboard_csv, 289 "users+fields": dump_users_with_fields_csv, 290 "teams+fields": dump_teams_with_fields_csv, 291 } 292 [end of CTFd/utils/csv/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py --- a/CTFd/utils/csv/__init__.py +++ b/CTFd/utils/csv/__init__.py @@ -252,8 +252,8 @@ hints = line.pop("hints", None) challenge_type = line.pop("type", "standard") - # Load in custome type_data - type_data = json.loads(line.pop("type_data", "{}")) + # Load in custom type_data + type_data = json.loads(line.pop("type_data", "{}") or "{}") line.update(type_data) ChallengeClass = get_chal_class(challenge_type)
{"golden_diff": "diff --git a/CTFd/utils/csv/__init__.py b/CTFd/utils/csv/__init__.py\n--- a/CTFd/utils/csv/__init__.py\n+++ b/CTFd/utils/csv/__init__.py\n@@ -252,8 +252,8 @@\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n \n- # Load in custome type_data\n- type_data = json.loads(line.pop(\"type_data\", \"{}\"))\n+ # Load in custom type_data\n+ type_data = json.loads(line.pop(\"type_data\", \"{}\") or \"{}\")\n line.update(type_data)\n \n ChallengeClass = get_chal_class(challenge_type)\n", "issue": "Add CSV examples for CSV Import\nAdd CSV examples for CSV Import\n", "before_files": [{"content": "import csv\nimport json\nfrom io import BytesIO, StringIO\n\nfrom CTFd.models import (\n Flags,\n Hints,\n Tags,\n TeamFields,\n Teams,\n UserFields,\n Users,\n db,\n get_class_by_tablename,\n)\nfrom CTFd.plugins.challenges import get_chal_class\nfrom CTFd.utils.config import is_teams_mode, is_users_mode\nfrom CTFd.utils.scores import get_standings\n\n\ndef get_dumpable_tables():\n csv_keys = list(CSV_KEYS.keys())\n db_keys = list(db.metadata.tables.keys())\n tables = csv_keys + db_keys\n table_keys = list(zip(tables, tables))\n return table_keys\n\n\ndef dump_csv(name):\n dump_func = CSV_KEYS.get(name)\n if dump_func:\n return dump_func()\n elif get_class_by_tablename(name):\n return dump_database_table(tablename=name)\n else:\n raise KeyError\n\n\ndef dump_scoreboard_csv():\n # TODO: Add fields to scoreboard data\n temp = StringIO()\n writer = csv.writer(temp)\n\n standings = get_standings()\n\n # Get all user fields in a specific order\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n if is_teams_mode():\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = (\n [\n \"place\",\n \"team\",\n \"team id\",\n \"score\",\n \"member name\",\n \"member id\",\n \"member email\",\n \"member score\",\n ]\n + user_field_names\n + team_field_names\n )\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n team = Teams.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n team_field_entries = {f.field_id: f.value for f in team.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n team_row = [\n i + 1,\n team.name,\n team.id,\n standing.score,\n \"\",\n \"\",\n ] + team_field_values\n\n writer.writerow(team_row)\n\n for member in team.members:\n user_field_entries = {f.field_id: f.value for f in member.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n \"\",\n \"\",\n \"\",\n \"\",\n member.name,\n member.id,\n member.email,\n member.score,\n ] + user_field_values\n writer.writerow(user_row)\n elif is_users_mode():\n header = [\"place\", \"user\", \"score\"] + user_field_names\n writer.writerow(header)\n\n for i, standing in enumerate(standings):\n user = Users.query.filter_by(id=standing.account_id).first()\n\n # Build field entries using the order of the field values\n user_field_entries = {f.field_id: f.value for f in user.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [i + 1, user.name, standing.score] + user_field_values\n writer.writerow(user_row)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_users_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n user_fields = UserFields.query.all()\n user_field_ids = [f.id for f in user_fields]\n user_field_names = [f.name for f in user_fields]\n\n header = [column.name for column in Users.__mapper__.columns] + user_field_names\n writer.writerow(header)\n\n responses = Users.query.all()\n\n for curr in responses:\n user_field_entries = {f.field_id: f.value for f in curr.field_entries}\n user_field_values = [\n user_field_entries.get(f_id, \"\") for f_id in user_field_ids\n ]\n user_row = [\n getattr(curr, column.name) for column in Users.__mapper__.columns\n ] + user_field_values\n writer.writerow(user_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_teams_with_fields_csv():\n temp = StringIO()\n writer = csv.writer(temp)\n\n team_fields = TeamFields.query.all()\n team_field_ids = [f.id for f in team_fields]\n team_field_names = [f.name for f in team_fields]\n\n header = [column.name for column in Teams.__mapper__.columns] + team_field_names\n writer.writerow(header)\n\n responses = Teams.query.all()\n\n for curr in responses:\n team_field_entries = {f.field_id: f.value for f in curr.field_entries}\n team_field_values = [\n team_field_entries.get(f_id, \"\") for f_id in team_field_ids\n ]\n\n team_row = [\n getattr(curr, column.name) for column in Teams.__mapper__.columns\n ] + team_field_values\n\n writer.writerow(team_row)\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef dump_database_table(tablename):\n # TODO: It might make sense to limit dumpable tables. Config could potentially leak sensitive information.\n model = get_class_by_tablename(tablename)\n\n if model is None:\n raise KeyError(\"Unknown database table\")\n\n temp = StringIO()\n writer = csv.writer(temp)\n\n header = [column.name for column in model.__mapper__.columns]\n writer.writerow(header)\n\n responses = model.query.all()\n\n for curr in responses:\n writer.writerow(\n [getattr(curr, column.name) for column in model.__mapper__.columns]\n )\n\n temp.seek(0)\n\n # In Python 3 send_file requires bytes\n output = BytesIO()\n output.write(temp.getvalue().encode(\"utf-8\"))\n output.seek(0)\n temp.close()\n\n return output\n\n\ndef load_users_csv(dict_reader):\n for line in dict_reader:\n result = Users(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_teams_csv(dict_reader):\n for line in dict_reader:\n result = Teams(**line)\n db.session.add(result)\n db.session.commit()\n return True\n\n\ndef load_challenges_csv(dict_reader):\n for line in dict_reader:\n flags = line.pop(\"flags\", None)\n tags = line.pop(\"tags\", None)\n hints = line.pop(\"hints\", None)\n challenge_type = line.pop(\"type\", \"standard\")\n\n # Load in custome type_data\n type_data = json.loads(line.pop(\"type_data\", \"{}\"))\n line.update(type_data)\n\n ChallengeClass = get_chal_class(challenge_type)\n challenge = ChallengeClass.challenge_model(**line)\n db.session.add(challenge)\n db.session.commit()\n\n if flags:\n flags = [flag.strip() for flag in flags.split(\",\")]\n for flag in flags:\n f = Flags(type=\"static\", challenge_id=challenge.id, content=flag,)\n db.session.add(f)\n db.session.commit()\n\n if tags:\n tags = [tag.strip() for tag in tags.split(\",\")]\n for tag in tags:\n t = Tags(challenge_id=challenge.id, value=tag,)\n db.session.add(t)\n db.session.commit()\n\n if hints:\n hints = [hint.strip() for hint in hints.split(\",\")]\n for hint in hints:\n h = Hints(challenge_id=challenge.id, content=hint,)\n db.session.add(h)\n db.session.commit()\n return True\n\n\nCSV_KEYS = {\n \"scoreboard\": dump_scoreboard_csv,\n \"users+fields\": dump_users_with_fields_csv,\n \"teams+fields\": dump_teams_with_fields_csv,\n}\n", "path": "CTFd/utils/csv/__init__.py"}]}
3,172
156
gh_patches_debug_22999
rasdani/github-patches
git_diff
cal-itp__benefits-924
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add verifier property to Events Soon users will have multiple pathways to verify eligibility, and it would be useful to know how many users are taking which pathway(s) in our analytics. We can get ahead of this now by adding an event property with the user's selected verifier (if any). Let's add this to the base Event definition so that we can later filter any event type by this property, similar to the Transit Agency information attached to each event. Linking this Amplitude reference again: https://help.amplitude.com/hc/en-us/articles/115002380567-User-properties-and-event-properties ## Acceptance Criteria <!-- Remember to consider edge cases --> - [ ] The base Event has an `eligibility_verifier` event property that gets its value from the user's `session.verifier()` - [ ] The Amplitude Tracking Plan is updated to accept this event property - [ ] The Amplitude --> Warehouse --> Metabase pipeline is updated to ingest this property ## Additional context This task was broken out from #342 and is focused on adding the verifier information to _existing_ events. #342 is now focused on introducing _new_ events for some forthcoming UI. </issue> <code> [start of benefits/core/analytics.py] 1 """ 2 The core application: analytics implementation. 3 """ 4 import itertools 5 import json 6 import logging 7 import re 8 import time 9 import uuid 10 11 from django.conf import settings 12 import requests 13 14 from benefits import VERSION 15 from . import session 16 17 18 logger = logging.getLogger(__name__) 19 20 21 class Event: 22 """Base analytics event of a given type, including attributes from request's session.""" 23 24 _counter = itertools.count() 25 _domain_re = re.compile(r"^(?:https?:\/\/)?(?:[^@\n]+@)?(?:www\.)?([^:\/\n?]+)", re.IGNORECASE) 26 27 def __init__(self, request, event_type, **kwargs): 28 self.app_version = VERSION 29 # device_id is generated based on the user_id, and both are set explicitly (per session) 30 self.device_id = session.did(request) 31 self.event_properties = {} 32 self.event_type = str(event_type).lower() 33 self.insert_id = str(uuid.uuid4()) 34 self.language = session.language(request) 35 # Amplitude tracks sessions using the start time as the session_id 36 self.session_id = session.start(request) 37 self.time = int(time.time() * 1000) 38 # Although Amplitude advises *against* setting user_id for anonymous users, here a value is set on anonymous 39 # users anyway, as the users never sign-in and become de-anonymized to this app / Amplitude. 40 self.user_id = session.uid(request) 41 self.user_properties = {} 42 self.__dict__.update(kwargs) 43 44 agency = session.agency(request) 45 agency_name = agency.long_name if agency else None 46 47 self.update_event_properties(path=request.path, transit_agency=agency_name) 48 49 uagent = request.headers.get("user-agent") 50 51 ref = request.headers.get("referer") 52 match = Event._domain_re.match(ref) if ref else None 53 refdom = match.group(1) if match else None 54 55 self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name) 56 57 # event is initialized, consume next counter 58 self.event_id = next(Event._counter) 59 60 def __str__(self): 61 return json.dumps(self.__dict__) 62 63 def update_event_properties(self, **kwargs): 64 """Merge kwargs into the self.event_properties dict.""" 65 self.event_properties.update(kwargs) 66 67 def update_user_properties(self, **kwargs): 68 """Merge kwargs into the self.user_properties dict.""" 69 self.user_properties.update(kwargs) 70 71 72 class ViewedPageEvent(Event): 73 """Analytics event representing a single page view.""" 74 75 def __init__(self, request): 76 super().__init__(request, "viewed page") 77 78 79 class ChangedLanguageEvent(Event): 80 """Analytics event representing a change in the app's language.""" 81 82 def __init__(self, request, new_lang): 83 super().__init__(request, "changed language") 84 self.update_event_properties(language=new_lang) 85 86 87 class Client: 88 """Analytics API client""" 89 90 def __init__(self, api_key): 91 self.api_key = api_key 92 self.headers = {"Accept": "*/*", "Content-type": "application/json"} 93 self.url = "https://api2.amplitude.com/2/httpapi" 94 logger.debug(f"Initialize Client for {self.url}") 95 96 def _payload(self, events): 97 if not isinstance(events, list): 98 events = [events] 99 return {"api_key": self.api_key, "events": [e.__dict__ for e in events]} 100 101 def send(self, event): 102 """Send an analytics event.""" 103 if not isinstance(event, Event): 104 raise ValueError("event must be an Event instance") 105 106 if not self.api_key: 107 logger.warning(f"api_key is not configured, cannot send event: {event}") 108 return 109 110 try: 111 payload = self._payload(event) 112 logger.debug(f"Sending event payload: {payload}") 113 114 r = requests.post(self.url, headers=self.headers, json=payload) 115 if r.status_code == 200: 116 logger.debug(f"Event sent successfully: {r.json()}") 117 elif r.status_code == 400: 118 logger.error(f"Event request was invalid: {r.json()}") 119 elif r.status_code == 413: 120 logger.error(f"Event payload was too large: {r.json()}") 121 elif r.status_code == 429: 122 logger.error(f"Event contained too many requests for some users: {r.json()}") 123 else: 124 logger.error(f"Failed to send event: {r.json()}") 125 126 except Exception: 127 logger.error(f"Failed to send event: {event}") 128 129 130 client = Client(settings.ANALYTICS_KEY) 131 132 133 def send_event(event): 134 """Send an analytics event.""" 135 if isinstance(event, Event): 136 client.send(event) 137 else: 138 raise ValueError("event must be an Event instance") 139 [end of benefits/core/analytics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/core/analytics.py b/benefits/core/analytics.py --- a/benefits/core/analytics.py +++ b/benefits/core/analytics.py @@ -43,8 +43,10 @@ agency = session.agency(request) agency_name = agency.long_name if agency else None + verifier = session.verifier(request) + verifier_name = verifier.name if verifier else None - self.update_event_properties(path=request.path, transit_agency=agency_name) + self.update_event_properties(path=request.path, transit_agency=agency_name, eligibility_verifier=verifier_name) uagent = request.headers.get("user-agent") @@ -52,7 +54,13 @@ match = Event._domain_re.match(ref) if ref else None refdom = match.group(1) if match else None - self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name) + self.update_user_properties( + referrer=ref, + referring_domain=refdom, + user_agent=uagent, + transit_agency=agency_name, + eligibility_verifier=verifier_name, + ) # event is initialized, consume next counter self.event_id = next(Event._counter)
{"golden_diff": "diff --git a/benefits/core/analytics.py b/benefits/core/analytics.py\n--- a/benefits/core/analytics.py\n+++ b/benefits/core/analytics.py\n@@ -43,8 +43,10 @@\n \n agency = session.agency(request)\n agency_name = agency.long_name if agency else None\n+ verifier = session.verifier(request)\n+ verifier_name = verifier.name if verifier else None\n \n- self.update_event_properties(path=request.path, transit_agency=agency_name)\n+ self.update_event_properties(path=request.path, transit_agency=agency_name, eligibility_verifier=verifier_name)\n \n uagent = request.headers.get(\"user-agent\")\n \n@@ -52,7 +54,13 @@\n match = Event._domain_re.match(ref) if ref else None\n refdom = match.group(1) if match else None\n \n- self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)\n+ self.update_user_properties(\n+ referrer=ref,\n+ referring_domain=refdom,\n+ user_agent=uagent,\n+ transit_agency=agency_name,\n+ eligibility_verifier=verifier_name,\n+ )\n \n # event is initialized, consume next counter\n self.event_id = next(Event._counter)\n", "issue": "Add verifier property to Events\nSoon users will have multiple pathways to verify eligibility, and it would be useful to know how many users are taking which pathway(s) in our analytics. We can get ahead of this now by adding an event property with the user's selected verifier (if any).\r\n\r\nLet's add this to the base Event definition so that we can later filter any event type by this property, similar to the Transit Agency information attached to each event.\r\n\r\nLinking this Amplitude reference again: https://help.amplitude.com/hc/en-us/articles/115002380567-User-properties-and-event-properties\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] The base Event has an `eligibility_verifier` event property that gets its value from the user's `session.verifier()`\r\n- [ ] The Amplitude Tracking Plan is updated to accept this event property\r\n- [ ] The Amplitude --> Warehouse --> Metabase pipeline is updated to ingest this property\r\n\r\n## Additional context\r\n\r\nThis task was broken out from #342 and is focused on adding the verifier information to _existing_ events. #342 is now focused on introducing _new_ events for some forthcoming UI.\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: analytics implementation.\n\"\"\"\nimport itertools\nimport json\nimport logging\nimport re\nimport time\nimport uuid\n\nfrom django.conf import settings\nimport requests\n\nfrom benefits import VERSION\nfrom . import session\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Event:\n \"\"\"Base analytics event of a given type, including attributes from request's session.\"\"\"\n\n _counter = itertools.count()\n _domain_re = re.compile(r\"^(?:https?:\\/\\/)?(?:[^@\\n]+@)?(?:www\\.)?([^:\\/\\n?]+)\", re.IGNORECASE)\n\n def __init__(self, request, event_type, **kwargs):\n self.app_version = VERSION\n # device_id is generated based on the user_id, and both are set explicitly (per session)\n self.device_id = session.did(request)\n self.event_properties = {}\n self.event_type = str(event_type).lower()\n self.insert_id = str(uuid.uuid4())\n self.language = session.language(request)\n # Amplitude tracks sessions using the start time as the session_id\n self.session_id = session.start(request)\n self.time = int(time.time() * 1000)\n # Although Amplitude advises *against* setting user_id for anonymous users, here a value is set on anonymous\n # users anyway, as the users never sign-in and become de-anonymized to this app / Amplitude.\n self.user_id = session.uid(request)\n self.user_properties = {}\n self.__dict__.update(kwargs)\n\n agency = session.agency(request)\n agency_name = agency.long_name if agency else None\n\n self.update_event_properties(path=request.path, transit_agency=agency_name)\n\n uagent = request.headers.get(\"user-agent\")\n\n ref = request.headers.get(\"referer\")\n match = Event._domain_re.match(ref) if ref else None\n refdom = match.group(1) if match else None\n\n self.update_user_properties(referrer=ref, referring_domain=refdom, user_agent=uagent, transit_agency=agency_name)\n\n # event is initialized, consume next counter\n self.event_id = next(Event._counter)\n\n def __str__(self):\n return json.dumps(self.__dict__)\n\n def update_event_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.event_properties dict.\"\"\"\n self.event_properties.update(kwargs)\n\n def update_user_properties(self, **kwargs):\n \"\"\"Merge kwargs into the self.user_properties dict.\"\"\"\n self.user_properties.update(kwargs)\n\n\nclass ViewedPageEvent(Event):\n \"\"\"Analytics event representing a single page view.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"viewed page\")\n\n\nclass ChangedLanguageEvent(Event):\n \"\"\"Analytics event representing a change in the app's language.\"\"\"\n\n def __init__(self, request, new_lang):\n super().__init__(request, \"changed language\")\n self.update_event_properties(language=new_lang)\n\n\nclass Client:\n \"\"\"Analytics API client\"\"\"\n\n def __init__(self, api_key):\n self.api_key = api_key\n self.headers = {\"Accept\": \"*/*\", \"Content-type\": \"application/json\"}\n self.url = \"https://api2.amplitude.com/2/httpapi\"\n logger.debug(f\"Initialize Client for {self.url}\")\n\n def _payload(self, events):\n if not isinstance(events, list):\n events = [events]\n return {\"api_key\": self.api_key, \"events\": [e.__dict__ for e in events]}\n\n def send(self, event):\n \"\"\"Send an analytics event.\"\"\"\n if not isinstance(event, Event):\n raise ValueError(\"event must be an Event instance\")\n\n if not self.api_key:\n logger.warning(f\"api_key is not configured, cannot send event: {event}\")\n return\n\n try:\n payload = self._payload(event)\n logger.debug(f\"Sending event payload: {payload}\")\n\n r = requests.post(self.url, headers=self.headers, json=payload)\n if r.status_code == 200:\n logger.debug(f\"Event sent successfully: {r.json()}\")\n elif r.status_code == 400:\n logger.error(f\"Event request was invalid: {r.json()}\")\n elif r.status_code == 413:\n logger.error(f\"Event payload was too large: {r.json()}\")\n elif r.status_code == 429:\n logger.error(f\"Event contained too many requests for some users: {r.json()}\")\n else:\n logger.error(f\"Failed to send event: {r.json()}\")\n\n except Exception:\n logger.error(f\"Failed to send event: {event}\")\n\n\nclient = Client(settings.ANALYTICS_KEY)\n\n\ndef send_event(event):\n \"\"\"Send an analytics event.\"\"\"\n if isinstance(event, Event):\n client.send(event)\n else:\n raise ValueError(\"event must be an Event instance\")\n", "path": "benefits/core/analytics.py"}]}
2,156
293
gh_patches_debug_9450
rasdani/github-patches
git_diff
celery__celery-5720
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Getting error when write a unit test using PyTest for a celery task -> pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery I wrote the following bare minimum unit test class for celery ```python import pytest @pytest.fixture def celery_config(): return { "broker_url": "redis://localhost:6379/0", "result_backend": "redis://localhost:6379/0" } @pytest.mark.celery(result_backend="redis://") class GetHash: def test_some(self): pass ``` I am getting the following error when executing the test ``` test_get_hash.py:12: in <module> @pytest.mark.celery(result_backend="redis://") /home/work/.virtualenvs/dev_env/lib/python3.6/site-packages/_pytest/mark/structures.py:324: in __getattr__ PytestUnknownMarkWarning, E pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery - is this a typo? ``` These are the items in the `_mark` set `structures.py` file ```python <class 'set'>: { 'tryfirst', 'skip', 'black', 'filterwarnings', 'parametrize', 'usefixtures', 'skipif', 'xfail', 'no_cover', 'trylast' } ``` These are the installed python libraries ``` amqp==2.5.1 anyjson==0.3.3 apipkg==1.5 appdirs==1.4.3 atomicwrites==1.3.0 attrs==19.1.0 autoflake==1.3 Babel==2.7.0 bandit==1.6.2 billiard==3.6.1.0 black==19.3b0 celery==4.3.0 Cerberus==1.3.1 certifi==2019.6.16 chardet==3.0.4 checksumdir==1.1.6 Click==7.0 coverage==4.5.3 execnet==1.6.0 Flask==1.0.2 Flask-Cors==3.0.8 flower==0.9.3 gitdb2==2.0.5 GitPython==2.1.13 idna==2.8 importlib-metadata==0.19 isort==4.3.20 itsdangerous==1.1.0 Jinja2==2.10.1 kombu==4.6.4 MarkupSafe==1.1.1 mock==3.0.5 more-itertools==7.0.0 mysql-connector-python==8.0.16 Nuitka==0.6.5 packaging==19.1 pbr==5.4.2 pluggy==0.12.0 protobuf==3.7.1 py==1.8.0 pyflakes==2.1.1 pyparsing==2.4.2 pytest==5.1.1 pytest-black==0.3.7 pytest-cov==2.7.1 pytest-forked==1.0.2 pytest-runner==5.1 pytest-xdist==1.29.0 python-dateutil==2.8.0 python-dotenv==0.10.1 pytz==2019.2 PyYAML==5.1.2 redis==3.3.8 requests==2.22.0 rq==1.1.0 six==1.12.0 smmap2==2.0.5 SQLAlchemy==1.3.3 stevedore==1.30.1 toml==0.10.0 tornado==5.1.1 urllib3==1.25.3 vine==1.3.0 wcwidth==0.1.7 Werkzeug==0.15.2 ``` Is the documentation missing an additional package? </issue> <code> [start of celery/contrib/pytest.py] 1 """Fixtures and testing utilities for :pypi:`py.test <pytest>`.""" 2 from __future__ import absolute_import, unicode_literals 3 4 import os 5 from contextlib import contextmanager 6 7 import pytest 8 9 from .testing import worker 10 from .testing.app import TestApp, setup_default_app 11 12 NO_WORKER = os.environ.get('NO_WORKER') 13 14 # pylint: disable=redefined-outer-name 15 # Well, they're called fixtures.... 16 17 18 @contextmanager 19 def _create_app(enable_logging=False, 20 use_trap=False, 21 parameters=None, 22 **config): 23 # type: (Any, **Any) -> Celery 24 """Utility context used to setup Celery app for pytest fixtures.""" 25 parameters = {} if not parameters else parameters 26 test_app = TestApp( 27 set_as_current=False, 28 enable_logging=enable_logging, 29 config=config, 30 **parameters 31 ) 32 with setup_default_app(test_app, use_trap=use_trap): 33 yield test_app 34 35 36 @pytest.fixture(scope='session') 37 def use_celery_app_trap(): 38 # type: () -> bool 39 """You can override this fixture to enable the app trap. 40 41 The app trap raises an exception whenever something attempts 42 to use the current or default apps. 43 """ 44 return False 45 46 47 @pytest.fixture(scope='session') 48 def celery_session_app(request, 49 celery_config, 50 celery_parameters, 51 celery_enable_logging, 52 use_celery_app_trap): 53 # type: (Any) -> Celery 54 """Session Fixture: Return app for session fixtures.""" 55 mark = request.node.get_closest_marker('celery') 56 config = dict(celery_config, **mark.kwargs if mark else {}) 57 with _create_app(enable_logging=celery_enable_logging, 58 use_trap=use_celery_app_trap, 59 parameters=celery_parameters, 60 **config) as app: 61 if not use_celery_app_trap: 62 app.set_default() 63 app.set_current() 64 yield app 65 66 67 @pytest.fixture(scope='session') 68 def celery_session_worker(request, 69 celery_session_app, 70 celery_includes, 71 celery_worker_pool, 72 celery_worker_parameters): 73 # type: (Any, Celery, Sequence[str], str) -> WorkController 74 """Session Fixture: Start worker that lives throughout test suite.""" 75 if not NO_WORKER: 76 for module in celery_includes: 77 celery_session_app.loader.import_task_module(module) 78 with worker.start_worker(celery_session_app, 79 pool=celery_worker_pool, 80 **celery_worker_parameters) as w: 81 yield w 82 83 84 @pytest.fixture(scope='session') 85 def celery_enable_logging(): 86 # type: () -> bool 87 """You can override this fixture to enable logging.""" 88 return False 89 90 91 @pytest.fixture(scope='session') 92 def celery_includes(): 93 # type: () -> Sequence[str] 94 """You can override this include modules when a worker start. 95 96 You can have this return a list of module names to import, 97 these can be task modules, modules registering signals, and so on. 98 """ 99 return () 100 101 102 @pytest.fixture(scope='session') 103 def celery_worker_pool(): 104 # type: () -> Union[str, Any] 105 """You can override this fixture to set the worker pool. 106 107 The "solo" pool is used by default, but you can set this to 108 return e.g. "prefork". 109 """ 110 return 'solo' 111 112 113 @pytest.fixture(scope='session') 114 def celery_config(): 115 # type: () -> Mapping[str, Any] 116 """Redefine this fixture to configure the test Celery app. 117 118 The config returned by your fixture will then be used 119 to configure the :func:`celery_app` fixture. 120 """ 121 return {} 122 123 124 @pytest.fixture(scope='session') 125 def celery_parameters(): 126 # type: () -> Mapping[str, Any] 127 """Redefine this fixture to change the init parameters of test Celery app. 128 129 The dict returned by your fixture will then be used 130 as parameters when instantiating :class:`~celery.Celery`. 131 """ 132 return {} 133 134 135 @pytest.fixture(scope='session') 136 def celery_worker_parameters(): 137 # type: () -> Mapping[str, Any] 138 """Redefine this fixture to change the init parameters of Celery workers. 139 140 This can be used e. g. to define queues the worker will consume tasks from. 141 142 The dict returned by your fixture will then be used 143 as parameters when instantiating :class:`~celery.worker.WorkController`. 144 """ 145 return {} 146 147 148 @pytest.fixture() 149 def celery_app(request, 150 celery_config, 151 celery_parameters, 152 celery_enable_logging, 153 use_celery_app_trap): 154 """Fixture creating a Celery application instance.""" 155 mark = request.node.get_closest_marker('celery') 156 config = dict(celery_config, **mark.kwargs if mark else {}) 157 with _create_app(enable_logging=celery_enable_logging, 158 use_trap=use_celery_app_trap, 159 parameters=celery_parameters, 160 **config) as app: 161 yield app 162 163 164 @pytest.fixture() 165 def celery_worker(request, 166 celery_app, 167 celery_includes, 168 celery_worker_pool, 169 celery_worker_parameters): 170 # type: (Any, Celery, Sequence[str], str) -> WorkController 171 """Fixture: Start worker in a thread, stop it when the test returns.""" 172 if not NO_WORKER: 173 for module in celery_includes: 174 celery_app.loader.import_task_module(module) 175 with worker.start_worker(celery_app, 176 pool=celery_worker_pool, 177 **celery_worker_parameters) as w: 178 yield w 179 180 181 @pytest.fixture() 182 def depends_on_current_app(celery_app): 183 """Fixture that sets app as current.""" 184 celery_app.set_current() 185 [end of celery/contrib/pytest.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py --- a/celery/contrib/pytest.py +++ b/celery/contrib/pytest.py @@ -15,6 +15,16 @@ # Well, they're called fixtures.... +def pytest_configure(config): + """Register additional pytest configuration.""" + # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section + # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker + # and shows helpful marker documentation when running pytest --markers. + config.addinivalue_line( + "markers", "celery(**overrides): override celery configuration for a test case" + ) + + @contextmanager def _create_app(enable_logging=False, use_trap=False,
{"golden_diff": "diff --git a/celery/contrib/pytest.py b/celery/contrib/pytest.py\n--- a/celery/contrib/pytest.py\n+++ b/celery/contrib/pytest.py\n@@ -15,6 +15,16 @@\n # Well, they're called fixtures....\n \n \n+def pytest_configure(config):\n+ \"\"\"Register additional pytest configuration.\"\"\"\n+ # add the pytest.mark.celery() marker registration to the pytest.ini [markers] section\n+ # this prevents pytest 4.5 and newer from issueing a warning about an unknown marker\n+ # and shows helpful marker documentation when running pytest --markers.\n+ config.addinivalue_line(\n+ \"markers\", \"celery(**overrides): override celery configuration for a test case\"\n+ )\n+\n+\n @contextmanager\n def _create_app(enable_logging=False,\n use_trap=False,\n", "issue": "Getting error when write a unit test using PyTest for a celery task -> pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery\nI wrote the following bare minimum unit test class for celery\r\n\r\n```python\r\nimport pytest\r\n\r\n\r\[email protected]\r\ndef celery_config():\r\n return {\r\n \"broker_url\": \"redis://localhost:6379/0\",\r\n \"result_backend\": \"redis://localhost:6379/0\"\r\n }\r\n\r\n\r\[email protected](result_backend=\"redis://\")\r\nclass GetHash:\r\n\r\n def test_some(self):\r\n pass\r\n```\r\n\r\nI am getting the following error when executing the test\r\n```\r\ntest_get_hash.py:12: in <module>\r\n @pytest.mark.celery(result_backend=\"redis://\")\r\n/home/work/.virtualenvs/dev_env/lib/python3.6/site-packages/_pytest/mark/structures.py:324: in __getattr__\r\n PytestUnknownMarkWarning,\r\nE pytest.PytestUnknownMarkWarning: Unknown pytest.mark.celery - is this a typo?\r\n```\r\n\r\nThese are the items in the `_mark` set `structures.py` file \r\n```python\r\n<class 'set'>: {\r\n 'tryfirst', \r\n 'skip',\r\n 'black',\r\n 'filterwarnings',\r\n 'parametrize',\r\n 'usefixtures',\r\n 'skipif',\r\n 'xfail',\r\n 'no_cover',\r\n 'trylast'\r\n}\r\n```\r\n\r\nThese are the installed python libraries \r\n```\r\namqp==2.5.1\r\nanyjson==0.3.3\r\napipkg==1.5\r\nappdirs==1.4.3\r\natomicwrites==1.3.0\r\nattrs==19.1.0\r\nautoflake==1.3\r\nBabel==2.7.0\r\nbandit==1.6.2\r\nbilliard==3.6.1.0\r\nblack==19.3b0\r\ncelery==4.3.0\r\nCerberus==1.3.1\r\ncertifi==2019.6.16\r\nchardet==3.0.4\r\nchecksumdir==1.1.6\r\nClick==7.0\r\ncoverage==4.5.3\r\nexecnet==1.6.0\r\nFlask==1.0.2\r\nFlask-Cors==3.0.8\r\nflower==0.9.3\r\ngitdb2==2.0.5\r\nGitPython==2.1.13\r\nidna==2.8\r\nimportlib-metadata==0.19\r\nisort==4.3.20\r\nitsdangerous==1.1.0\r\nJinja2==2.10.1\r\nkombu==4.6.4\r\nMarkupSafe==1.1.1\r\nmock==3.0.5\r\nmore-itertools==7.0.0\r\nmysql-connector-python==8.0.16\r\nNuitka==0.6.5\r\npackaging==19.1\r\npbr==5.4.2\r\npluggy==0.12.0\r\nprotobuf==3.7.1\r\npy==1.8.0\r\npyflakes==2.1.1\r\npyparsing==2.4.2\r\npytest==5.1.1\r\npytest-black==0.3.7\r\npytest-cov==2.7.1\r\npytest-forked==1.0.2\r\npytest-runner==5.1\r\npytest-xdist==1.29.0\r\npython-dateutil==2.8.0\r\npython-dotenv==0.10.1\r\npytz==2019.2\r\nPyYAML==5.1.2\r\nredis==3.3.8\r\nrequests==2.22.0\r\nrq==1.1.0\r\nsix==1.12.0\r\nsmmap2==2.0.5\r\nSQLAlchemy==1.3.3\r\nstevedore==1.30.1\r\ntoml==0.10.0\r\ntornado==5.1.1\r\nurllib3==1.25.3\r\nvine==1.3.0\r\nwcwidth==0.1.7\r\nWerkzeug==0.15.2\r\n\r\n```\r\nIs the documentation missing an additional package?\n", "before_files": [{"content": "\"\"\"Fixtures and testing utilities for :pypi:`py.test <pytest>`.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nfrom contextlib import contextmanager\n\nimport pytest\n\nfrom .testing import worker\nfrom .testing.app import TestApp, setup_default_app\n\nNO_WORKER = os.environ.get('NO_WORKER')\n\n# pylint: disable=redefined-outer-name\n# Well, they're called fixtures....\n\n\n@contextmanager\ndef _create_app(enable_logging=False,\n use_trap=False,\n parameters=None,\n **config):\n # type: (Any, **Any) -> Celery\n \"\"\"Utility context used to setup Celery app for pytest fixtures.\"\"\"\n parameters = {} if not parameters else parameters\n test_app = TestApp(\n set_as_current=False,\n enable_logging=enable_logging,\n config=config,\n **parameters\n )\n with setup_default_app(test_app, use_trap=use_trap):\n yield test_app\n\n\[email protected](scope='session')\ndef use_celery_app_trap():\n # type: () -> bool\n \"\"\"You can override this fixture to enable the app trap.\n\n The app trap raises an exception whenever something attempts\n to use the current or default apps.\n \"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_session_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n # type: (Any) -> Celery\n \"\"\"Session Fixture: Return app for session fixtures.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n if not use_celery_app_trap:\n app.set_default()\n app.set_current()\n yield app\n\n\[email protected](scope='session')\ndef celery_session_worker(request,\n celery_session_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Session Fixture: Start worker that lives throughout test suite.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_session_app.loader.import_task_module(module)\n with worker.start_worker(celery_session_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected](scope='session')\ndef celery_enable_logging():\n # type: () -> bool\n \"\"\"You can override this fixture to enable logging.\"\"\"\n return False\n\n\[email protected](scope='session')\ndef celery_includes():\n # type: () -> Sequence[str]\n \"\"\"You can override this include modules when a worker start.\n\n You can have this return a list of module names to import,\n these can be task modules, modules registering signals, and so on.\n \"\"\"\n return ()\n\n\[email protected](scope='session')\ndef celery_worker_pool():\n # type: () -> Union[str, Any]\n \"\"\"You can override this fixture to set the worker pool.\n\n The \"solo\" pool is used by default, but you can set this to\n return e.g. \"prefork\".\n \"\"\"\n return 'solo'\n\n\[email protected](scope='session')\ndef celery_config():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to configure the test Celery app.\n\n The config returned by your fixture will then be used\n to configure the :func:`celery_app` fixture.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of test Celery app.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.Celery`.\n \"\"\"\n return {}\n\n\[email protected](scope='session')\ndef celery_worker_parameters():\n # type: () -> Mapping[str, Any]\n \"\"\"Redefine this fixture to change the init parameters of Celery workers.\n\n This can be used e. g. to define queues the worker will consume tasks from.\n\n The dict returned by your fixture will then be used\n as parameters when instantiating :class:`~celery.worker.WorkController`.\n \"\"\"\n return {}\n\n\[email protected]()\ndef celery_app(request,\n celery_config,\n celery_parameters,\n celery_enable_logging,\n use_celery_app_trap):\n \"\"\"Fixture creating a Celery application instance.\"\"\"\n mark = request.node.get_closest_marker('celery')\n config = dict(celery_config, **mark.kwargs if mark else {})\n with _create_app(enable_logging=celery_enable_logging,\n use_trap=use_celery_app_trap,\n parameters=celery_parameters,\n **config) as app:\n yield app\n\n\[email protected]()\ndef celery_worker(request,\n celery_app,\n celery_includes,\n celery_worker_pool,\n celery_worker_parameters):\n # type: (Any, Celery, Sequence[str], str) -> WorkController\n \"\"\"Fixture: Start worker in a thread, stop it when the test returns.\"\"\"\n if not NO_WORKER:\n for module in celery_includes:\n celery_app.loader.import_task_module(module)\n with worker.start_worker(celery_app,\n pool=celery_worker_pool,\n **celery_worker_parameters) as w:\n yield w\n\n\[email protected]()\ndef depends_on_current_app(celery_app):\n \"\"\"Fixture that sets app as current.\"\"\"\n celery_app.set_current()\n", "path": "celery/contrib/pytest.py"}]}
3,144
188
gh_patches_debug_19565
rasdani/github-patches
git_diff
certbot__certbot-1591
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Log when simple-verify fails Half #1409. When we fail to self verify a challenge, we should log the problem and continue instead of exiting. </issue> <code> [start of letsencrypt/plugins/manual.py] 1 """Manual plugin.""" 2 import os 3 import logging 4 import pipes 5 import shutil 6 import signal 7 import socket 8 import subprocess 9 import sys 10 import tempfile 11 import time 12 13 import zope.component 14 import zope.interface 15 16 from acme import challenges 17 18 from letsencrypt import errors 19 from letsencrypt import interfaces 20 from letsencrypt.plugins import common 21 22 23 logger = logging.getLogger(__name__) 24 25 26 class Authenticator(common.Plugin): 27 """Manual Authenticator. 28 29 This plugin requires user's manual intervention in setting up a HTTP 30 server for solving http-01 challenges and thus does not need to be 31 run as a privileged process. Alternatively shows instructions on how 32 to use Python's built-in HTTP server. 33 34 .. todo:: Support for `~.challenges.TLSSNI01`. 35 36 """ 37 zope.interface.implements(interfaces.IAuthenticator) 38 zope.interface.classProvides(interfaces.IPluginFactory) 39 hidden = True 40 41 description = "Manually configure an HTTP server" 42 43 MESSAGE_TEMPLATE = """\ 44 Make sure your web server displays the following content at 45 {uri} before continuing: 46 47 {validation} 48 49 Content-Type header MUST be set to {ct}. 50 51 If you don't have HTTP server configured, you can run the following 52 command on the target server (as root): 53 54 {command} 55 """ 56 57 # a disclaimer about your current IP being transmitted to Let's Encrypt's servers. 58 IP_DISCLAIMER = """\ 59 NOTE: The IP of this machine will be publicly logged as having requested this certificate. \ 60 If you're running letsencrypt in manual mode on a machine that is not your server, \ 61 please ensure you're okay with that. 62 63 Are you OK with your IP being logged? 64 """ 65 66 # "cd /tmp/letsencrypt" makes sure user doesn't serve /root, 67 # separate "public_html" ensures that cert.pem/key.pem are not 68 # served and makes it more obvious that Python command will serve 69 # anything recursively under the cwd 70 71 CMD_TEMPLATE = """\ 72 mkdir -p {root}/public_html/{achall.URI_ROOT_PATH} 73 cd {root}/public_html 74 printf "%s" {validation} > {achall.URI_ROOT_PATH}/{encoded_token} 75 # run only once per server: 76 $(command -v python2 || command -v python2.7 || command -v python2.6) -c \\ 77 "import BaseHTTPServer, SimpleHTTPServer; \\ 78 SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\ 79 s = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\ 80 s.serve_forever()" """ 81 """Command template.""" 82 83 def __init__(self, *args, **kwargs): 84 super(Authenticator, self).__init__(*args, **kwargs) 85 self._root = (tempfile.mkdtemp() if self.conf("test-mode") 86 else "/tmp/letsencrypt") 87 self._httpd = None 88 89 @classmethod 90 def add_parser_arguments(cls, add): 91 add("test-mode", action="store_true", 92 help="Test mode. Executes the manual command in subprocess.") 93 94 def prepare(self): # pylint: disable=missing-docstring,no-self-use 95 pass # pragma: no cover 96 97 def more_info(self): # pylint: disable=missing-docstring,no-self-use 98 return ("This plugin requires user's manual intervention in setting " 99 "up an HTTP server for solving http-01 challenges and thus " 100 "does not need to be run as a privileged process. " 101 "Alternatively shows instructions on how to use Python's " 102 "built-in HTTP server.") 103 104 def get_chall_pref(self, domain): 105 # pylint: disable=missing-docstring,no-self-use,unused-argument 106 return [challenges.HTTP01] 107 108 def perform(self, achalls): # pylint: disable=missing-docstring 109 responses = [] 110 # TODO: group achalls by the same socket.gethostbyname(_ex) 111 # and prompt only once per server (one "echo -n" per domain) 112 for achall in achalls: 113 responses.append(self._perform_single(achall)) 114 return responses 115 116 @classmethod 117 def _test_mode_busy_wait(cls, port): 118 while True: 119 time.sleep(1) 120 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 121 try: 122 sock.connect(("localhost", port)) 123 except socket.error: # pragma: no cover 124 pass 125 else: 126 break 127 finally: 128 sock.close() 129 130 def _perform_single(self, achall): 131 # same path for each challenge response would be easier for 132 # users, but will not work if multiple domains point at the 133 # same server: default command doesn't support virtual hosts 134 response, validation = achall.response_and_validation() 135 136 port = (response.port if self.config.http01_port is None 137 else int(self.config.http01_port)) 138 command = self.CMD_TEMPLATE.format( 139 root=self._root, achall=achall, response=response, 140 # TODO(kuba): pipes still necessary? 141 validation=pipes.quote(validation), 142 encoded_token=achall.chall.encode("token"), 143 ct=achall.CONTENT_TYPE, port=port) 144 if self.conf("test-mode"): 145 logger.debug("Test mode. Executing the manual command: %s", command) 146 # sh shipped with OS X does't support echo -n, but supports printf 147 try: 148 self._httpd = subprocess.Popen( 149 command, 150 # don't care about setting stdout and stderr, 151 # we're in test mode anyway 152 shell=True, 153 executable=None, 154 # "preexec_fn" is UNIX specific, but so is "command" 155 preexec_fn=os.setsid) 156 except OSError as error: # ValueError should not happen! 157 logger.debug( 158 "Couldn't execute manual command: %s", error, exc_info=True) 159 return False 160 logger.debug("Manual command running as PID %s.", self._httpd.pid) 161 # give it some time to bootstrap, before we try to verify 162 # (cert generation in case of simpleHttpS might take time) 163 self._test_mode_busy_wait(port) 164 if self._httpd.poll() is not None: 165 raise errors.Error("Couldn't execute manual command") 166 else: 167 if not zope.component.getUtility(interfaces.IDisplay).yesno( 168 self.IP_DISCLAIMER, "Yes", "No"): 169 raise errors.PluginError("Must agree to IP logging to proceed") 170 171 self._notify_and_wait(self.MESSAGE_TEMPLATE.format( 172 validation=validation, response=response, 173 uri=achall.chall.uri(achall.domain), 174 ct=achall.CONTENT_TYPE, command=command)) 175 176 if response.simple_verify( 177 achall.chall, achall.domain, 178 achall.account_key.public_key(), self.config.http01_port): 179 return response 180 else: 181 logger.error( 182 "Self-verify of challenge failed, authorization abandoned.") 183 if self.conf("test-mode") and self._httpd.poll() is not None: 184 # simply verify cause command failure... 185 return False 186 return None 187 188 def _notify_and_wait(self, message): # pylint: disable=no-self-use 189 # TODO: IDisplay wraps messages, breaking the command 190 #answer = zope.component.getUtility(interfaces.IDisplay).notification( 191 # message=message, height=25, pause=True) 192 sys.stdout.write(message) 193 raw_input("Press ENTER to continue") 194 195 def cleanup(self, achalls): 196 # pylint: disable=missing-docstring,no-self-use,unused-argument 197 if self.conf("test-mode"): 198 assert self._httpd is not None, ( 199 "cleanup() must be called after perform()") 200 if self._httpd.poll() is None: 201 logger.debug("Terminating manual command process") 202 os.killpg(self._httpd.pid, signal.SIGTERM) 203 else: 204 logger.debug("Manual command process already terminated " 205 "with %s code", self._httpd.returncode) 206 shutil.rmtree(self._root) 207 [end of letsencrypt/plugins/manual.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/letsencrypt/plugins/manual.py b/letsencrypt/plugins/manual.py --- a/letsencrypt/plugins/manual.py +++ b/letsencrypt/plugins/manual.py @@ -173,17 +173,12 @@ uri=achall.chall.uri(achall.domain), ct=achall.CONTENT_TYPE, command=command)) - if response.simple_verify( + if not response.simple_verify( achall.chall, achall.domain, achall.account_key.public_key(), self.config.http01_port): - return response - else: - logger.error( - "Self-verify of challenge failed, authorization abandoned.") - if self.conf("test-mode") and self._httpd.poll() is not None: - # simply verify cause command failure... - return False - return None + logger.warning("Self-verify of challenge failed.") + + return response def _notify_and_wait(self, message): # pylint: disable=no-self-use # TODO: IDisplay wraps messages, breaking the command
{"golden_diff": "diff --git a/letsencrypt/plugins/manual.py b/letsencrypt/plugins/manual.py\n--- a/letsencrypt/plugins/manual.py\n+++ b/letsencrypt/plugins/manual.py\n@@ -173,17 +173,12 @@\n uri=achall.chall.uri(achall.domain),\n ct=achall.CONTENT_TYPE, command=command))\n \n- if response.simple_verify(\n+ if not response.simple_verify(\n achall.chall, achall.domain,\n achall.account_key.public_key(), self.config.http01_port):\n- return response\n- else:\n- logger.error(\n- \"Self-verify of challenge failed, authorization abandoned.\")\n- if self.conf(\"test-mode\") and self._httpd.poll() is not None:\n- # simply verify cause command failure...\n- return False\n- return None\n+ logger.warning(\"Self-verify of challenge failed.\")\n+\n+ return response\n \n def _notify_and_wait(self, message): # pylint: disable=no-self-use\n # TODO: IDisplay wraps messages, breaking the command\n", "issue": "Log when simple-verify fails\nHalf #1409. When we fail to self verify a challenge, we should log the problem and continue instead of exiting.\n\n", "before_files": [{"content": "\"\"\"Manual plugin.\"\"\"\nimport os\nimport logging\nimport pipes\nimport shutil\nimport signal\nimport socket\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport zope.component\nimport zope.interface\n\nfrom acme import challenges\n\nfrom letsencrypt import errors\nfrom letsencrypt import interfaces\nfrom letsencrypt.plugins import common\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Authenticator(common.Plugin):\n \"\"\"Manual Authenticator.\n\n This plugin requires user's manual intervention in setting up a HTTP\n server for solving http-01 challenges and thus does not need to be\n run as a privileged process. Alternatively shows instructions on how\n to use Python's built-in HTTP server.\n\n .. todo:: Support for `~.challenges.TLSSNI01`.\n\n \"\"\"\n zope.interface.implements(interfaces.IAuthenticator)\n zope.interface.classProvides(interfaces.IPluginFactory)\n hidden = True\n\n description = \"Manually configure an HTTP server\"\n\n MESSAGE_TEMPLATE = \"\"\"\\\nMake sure your web server displays the following content at\n{uri} before continuing:\n\n{validation}\n\nContent-Type header MUST be set to {ct}.\n\nIf you don't have HTTP server configured, you can run the following\ncommand on the target server (as root):\n\n{command}\n\"\"\"\n\n # a disclaimer about your current IP being transmitted to Let's Encrypt's servers.\n IP_DISCLAIMER = \"\"\"\\\nNOTE: The IP of this machine will be publicly logged as having requested this certificate. \\\nIf you're running letsencrypt in manual mode on a machine that is not your server, \\\nplease ensure you're okay with that.\n\nAre you OK with your IP being logged?\n\"\"\"\n\n # \"cd /tmp/letsencrypt\" makes sure user doesn't serve /root,\n # separate \"public_html\" ensures that cert.pem/key.pem are not\n # served and makes it more obvious that Python command will serve\n # anything recursively under the cwd\n\n CMD_TEMPLATE = \"\"\"\\\nmkdir -p {root}/public_html/{achall.URI_ROOT_PATH}\ncd {root}/public_html\nprintf \"%s\" {validation} > {achall.URI_ROOT_PATH}/{encoded_token}\n# run only once per server:\n$(command -v python2 || command -v python2.7 || command -v python2.6) -c \\\\\n\"import BaseHTTPServer, SimpleHTTPServer; \\\\\nSimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map = {{'': '{ct}'}}; \\\\\ns = BaseHTTPServer.HTTPServer(('', {port}), SimpleHTTPServer.SimpleHTTPRequestHandler); \\\\\ns.serve_forever()\" \"\"\"\n \"\"\"Command template.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self._root = (tempfile.mkdtemp() if self.conf(\"test-mode\")\n else \"/tmp/letsencrypt\")\n self._httpd = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n add(\"test-mode\", action=\"store_true\",\n help=\"Test mode. Executes the manual command in subprocess.\")\n\n def prepare(self): # pylint: disable=missing-docstring,no-self-use\n pass # pragma: no cover\n\n def more_info(self): # pylint: disable=missing-docstring,no-self-use\n return (\"This plugin requires user's manual intervention in setting \"\n \"up an HTTP server for solving http-01 challenges and thus \"\n \"does not need to be run as a privileged process. \"\n \"Alternatively shows instructions on how to use Python's \"\n \"built-in HTTP server.\")\n\n def get_chall_pref(self, domain):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n return [challenges.HTTP01]\n\n def perform(self, achalls): # pylint: disable=missing-docstring\n responses = []\n # TODO: group achalls by the same socket.gethostbyname(_ex)\n # and prompt only once per server (one \"echo -n\" per domain)\n for achall in achalls:\n responses.append(self._perform_single(achall))\n return responses\n\n @classmethod\n def _test_mode_busy_wait(cls, port):\n while True:\n time.sleep(1)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n sock.connect((\"localhost\", port))\n except socket.error: # pragma: no cover\n pass\n else:\n break\n finally:\n sock.close()\n\n def _perform_single(self, achall):\n # same path for each challenge response would be easier for\n # users, but will not work if multiple domains point at the\n # same server: default command doesn't support virtual hosts\n response, validation = achall.response_and_validation()\n\n port = (response.port if self.config.http01_port is None\n else int(self.config.http01_port))\n command = self.CMD_TEMPLATE.format(\n root=self._root, achall=achall, response=response,\n # TODO(kuba): pipes still necessary?\n validation=pipes.quote(validation),\n encoded_token=achall.chall.encode(\"token\"),\n ct=achall.CONTENT_TYPE, port=port)\n if self.conf(\"test-mode\"):\n logger.debug(\"Test mode. Executing the manual command: %s\", command)\n # sh shipped with OS X does't support echo -n, but supports printf\n try:\n self._httpd = subprocess.Popen(\n command,\n # don't care about setting stdout and stderr,\n # we're in test mode anyway\n shell=True,\n executable=None,\n # \"preexec_fn\" is UNIX specific, but so is \"command\"\n preexec_fn=os.setsid)\n except OSError as error: # ValueError should not happen!\n logger.debug(\n \"Couldn't execute manual command: %s\", error, exc_info=True)\n return False\n logger.debug(\"Manual command running as PID %s.\", self._httpd.pid)\n # give it some time to bootstrap, before we try to verify\n # (cert generation in case of simpleHttpS might take time)\n self._test_mode_busy_wait(port)\n if self._httpd.poll() is not None:\n raise errors.Error(\"Couldn't execute manual command\")\n else:\n if not zope.component.getUtility(interfaces.IDisplay).yesno(\n self.IP_DISCLAIMER, \"Yes\", \"No\"):\n raise errors.PluginError(\"Must agree to IP logging to proceed\")\n\n self._notify_and_wait(self.MESSAGE_TEMPLATE.format(\n validation=validation, response=response,\n uri=achall.chall.uri(achall.domain),\n ct=achall.CONTENT_TYPE, command=command))\n\n if response.simple_verify(\n achall.chall, achall.domain,\n achall.account_key.public_key(), self.config.http01_port):\n return response\n else:\n logger.error(\n \"Self-verify of challenge failed, authorization abandoned.\")\n if self.conf(\"test-mode\") and self._httpd.poll() is not None:\n # simply verify cause command failure...\n return False\n return None\n\n def _notify_and_wait(self, message): # pylint: disable=no-self-use\n # TODO: IDisplay wraps messages, breaking the command\n #answer = zope.component.getUtility(interfaces.IDisplay).notification(\n # message=message, height=25, pause=True)\n sys.stdout.write(message)\n raw_input(\"Press ENTER to continue\")\n\n def cleanup(self, achalls):\n # pylint: disable=missing-docstring,no-self-use,unused-argument\n if self.conf(\"test-mode\"):\n assert self._httpd is not None, (\n \"cleanup() must be called after perform()\")\n if self._httpd.poll() is None:\n logger.debug(\"Terminating manual command process\")\n os.killpg(self._httpd.pid, signal.SIGTERM)\n else:\n logger.debug(\"Manual command process already terminated \"\n \"with %s code\", self._httpd.returncode)\n shutil.rmtree(self._root)\n", "path": "letsencrypt/plugins/manual.py"}]}
2,820
234
gh_patches_debug_2285
rasdani/github-patches
git_diff
jupyterhub__zero-to-jupyterhub-k8s-1710
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CI: linkcheck of our docs fails ### Bug description Our test fails [like this](https://travis-ci.org/github/jupyterhub/zero-to-jupyterhub-k8s/jobs/699287339), which is initialized as described in `.travis.yml` under `docs:linkcheck`. ``` /home/travis/build/jupyterhub/zero-to-jupyterhub-k8s/doc/source/customizing/user-environment.rst:4: WARNING: Duplicate explicit target name: "jupyterlab". ``` ### How to reproduce I think something like this, but check `.travis.yml` for details on how it is done there. - Clone repo, enter the `doc/` folder. - Install doc-requirements.txt with pip. - `make linkcheck` </issue> <code> [start of doc/source/conf.py] 1 2 # Configuration file for the Sphinx documentation builder. 3 # 4 # This file only contains a selection of the most common options. For a full 5 # list see the documentation: 6 # https://www.sphinx-doc.org/en/master/usage/configuration.html 7 8 # -- Path setup -------------------------------------------------------------- 9 10 # If extensions (or modules to document with autodoc) are in another directory, 11 # add these directories to sys.path here. If the directory is relative to the 12 # documentation root, use os.path.abspath to make it absolute, like shown here. 13 # 14 # import os 15 # import sys 16 # sys.path.insert(0, os.path.abspath('.')) 17 18 19 # -- Project specific imports ------------------------------------------------ 20 21 from datetime import date 22 23 import yaml 24 25 26 # -- Sphinx setup function --------------------------------------------------- 27 # ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function 28 29 def setup(app): 30 app.add_css_file('custom.css') 31 32 33 # -- Project information ----------------------------------------------------- 34 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information 35 36 project = 'Zero to JupyterHub with Kubernetes' 37 copyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year) 38 author = 'Project Jupyter Contributors' 39 40 # The version info for the project you're documenting, acts as replacement for 41 # |version| and |release|, also used in various other places throughout the 42 # built documents. 43 # 44 with open('../../jupyterhub/Chart.yaml') as f: 45 chart = yaml.safe_load(f) 46 version = chart['version'].split('-', 1)[0] 47 release = chart['version'] 48 49 # Project specific variables 50 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog 51 rst_epilog = """ 52 .. |hub_version| replace:: {v} 53 """.format(v=chart['appVersion']) 54 55 56 # -- General configuration --------------------------------------------------- 57 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration 58 59 # Set the default role so we can use `foo` instead of ``foo`` 60 default_role = 'literal' 61 62 # Add any Sphinx extension module names here, as strings. They can be 63 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 64 # ones. 65 extensions = ['sphinx.ext.mathjax', 66 'sphinx_copybutton', 67 'myst_parser'] 68 69 # List of patterns, relative to source directory, that match files and 70 # directories to ignore when looking for source files. 71 # This pattern also affects html_static_path and html_extra_path. 72 exclude_patterns = [] 73 74 # The master toctree document. 75 master_doc = 'index' 76 77 # The suffix(es) of source filenames. 78 source_suffix = ['.rst', '.md'] 79 80 # Add any paths that contain templates here, relative to this directory. 81 templates_path = ['_templates'] 82 83 84 # -- Generate the Helm chart configuration reference from a schema file ------ 85 86 # header 87 with open('reference/reference.txt', 'r') as f: 88 header_md = f.readlines() 89 header_md = header_md[1:] 90 header_md = [ln.strip('\n') for ln in header_md] 91 92 # schema 93 with open('../../jupyterhub/schema.yaml', 'r') as f: 94 data = yaml.safe_load(f) 95 def parse_schema(d, md=[], depth=0, pre=''): 96 """ 97 Generate markdown headers from a passed python dictionary created by 98 parsing a schema.yaml file. 99 """ 100 if 'properties' in d: 101 depth += 1 102 # Create markdown headers for each schema level 103 for key, val in d['properties'].items(): 104 md.append('#'*(depth + 1) + ' ' + pre + key) 105 md.append('') 106 if 'description' in val: 107 for ln in val['description'].split('\n'): 108 md.append(ln) 109 md.append('') 110 111 parse_schema(val, md, depth, pre+'{}.'.format(key)) 112 depth -= 1 113 return md 114 schema_md = parse_schema(data) 115 116 # reference = header + schema 117 reference_md = header_md + schema_md 118 with open('reference/reference.md', 'w') as f: 119 f.write('\n'.join(reference_md)) 120 121 122 # -- Options for linkcheck builder ------------------------------------------- 123 # ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder 124 linkcheck_ignore = [ 125 r'(.*)github\.com(.*)#', # javascript based anchors 126 r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case 127 "https://your-domain.com", # example 128 "https://your-domain-name.com", # example 129 "https://kubernetes.io/docs/tutorials/kubernetes-basics/", # works 130 "https://cloud.ibm.com/kubernetes/catalog/create", # works 131 ] 132 linkcheck_anchors_ignore = [ 133 "/#!", 134 "/#%21", 135 ] 136 137 138 # -- Options for HTML output ------------------------------------------------- 139 # ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output 140 141 # The theme to use for HTML and HTML Help pages. See the documentation for 142 # a list of builtin themes. 143 # 144 html_theme = 'pydata_sphinx_theme' 145 html_favicon = '_static/images/logo/favicon.ico' 146 html_logo = '_static/images/logo/logo.png' 147 148 # Add any paths that contain custom static files (such as style sheets) here, 149 # relative to this directory. They are copied after the builtin static files, 150 # so a file named "default.css" will overwrite the builtin "default.css". 151 html_static_path = ['_static'] 152 153 154 155 156 # -- Below are options for more esoteric output ------------------------------- 157 # ----------------------------------------------------------------------------- 158 159 160 # -- Options for HTML help output --------------------------------------------- 161 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output 162 163 # Output file base name for HTML help builder. 164 htmlhelp_basename = 'ZeroToJupyterhubDoc' 165 166 167 # -- Options for LaTeX output ------------------------------------------------ 168 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output 169 170 latex_elements = { 171 # The paper size ('letterpaper' or 'a4paper'). 172 # 173 # 'papersize': 'letterpaper', 174 175 # The font size ('10pt', '11pt' or '12pt'). 176 # 177 # 'pointsize': '10pt', 178 179 # Additional stuff for the LaTeX preamble. 180 # 181 # 'preamble': '', 182 183 # Latex figure (float) alignment 184 # 185 # 'figure_align': 'htbp', 186 } 187 188 # Grouping the document tree into LaTeX files. 189 latex_documents = [ 190 ( 191 master_doc, # source start file 192 'ZeroToJupyterhubDoc.tex', # target name 193 'Zero to JupyterHub', # title 194 author, # author 195 'manual' # documentclass [howto, manual, or own class] 196 ), 197 ] 198 199 200 # -- Options for manual page output ------------------------------------------ 201 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output 202 203 # One entry per manual page. 204 man_pages = [ 205 ( 206 master_doc, # source start file 207 'zerotojupyterhub', # name 208 'Zero to JupyterHub', # description 209 [author], # authors 210 1, # manual section 211 ), 212 ] 213 214 215 # -- Options for Texinfo output ---------------------------------------------- 216 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output 217 218 # Grouping the document tree into Texinfo files. 219 texinfo_documents = [ 220 ( 221 master_doc, # source start file 222 'ZeroToJupyterhubDoc', # target name 223 'Zero to JupyterHub', # title 224 author, # author 225 'ZeroToJupyterhubDoc', # dir menu entry 226 'One line description of project.', # description 227 'Miscellaneous' # category 228 ), 229 ] 230 231 232 # -- Options for epub output ------------------------------------------------- 233 # ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output 234 235 # Bibliographic Dublin Core info. 236 epub_title = project 237 epub_author = author 238 epub_publisher = author 239 epub_copyright = copyright 240 241 # The unique identifier of the text. This can be a ISBN number 242 # or the project homepage. 243 # 244 # epub_identifier = '' 245 246 # A unique identification for the text. 247 # 248 # epub_uid = '' 249 250 # A list of files that should not be packed into the epub file. 251 epub_exclude_files = ['search.html'] 252 [end of doc/source/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/doc/source/conf.py b/doc/source/conf.py --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -77,9 +77,6 @@ # The suffix(es) of source filenames. source_suffix = ['.rst', '.md'] -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - # -- Generate the Helm chart configuration reference from a schema file ------
{"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -77,9 +77,6 @@\n # The suffix(es) of source filenames.\n source_suffix = ['.rst', '.md']\n \n-# Add any paths that contain templates here, relative to this directory.\n-templates_path = ['_templates']\n-\n \n # -- Generate the Helm chart configuration reference from a schema file ------\n", "issue": "CI: linkcheck of our docs fails\n### Bug description\r\nOur test fails [like this](https://travis-ci.org/github/jupyterhub/zero-to-jupyterhub-k8s/jobs/699287339), which is initialized as described in `.travis.yml` under `docs:linkcheck`.\r\n\r\n```\r\n/home/travis/build/jupyterhub/zero-to-jupyterhub-k8s/doc/source/customizing/user-environment.rst:4: WARNING: Duplicate explicit target name: \"jupyterlab\".\r\n```\r\n\r\n### How to reproduce\r\n\r\nI think something like this, but check `.travis.yml` for details on how it is done there.\r\n\r\n- Clone repo, enter the `doc/` folder.\r\n- Install doc-requirements.txt with pip.\r\n- `make linkcheck`\n", "before_files": [{"content": "\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- Project specific imports ------------------------------------------------\n\nfrom datetime import date\n\nimport yaml\n\n\n# -- Sphinx setup function ---------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/extdev/tutorial.html#the-setup-function\n\ndef setup(app):\n app.add_css_file('custom.css')\n\n\n# -- Project information -----------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#project-information\n\nproject = 'Zero to JupyterHub with Kubernetes'\ncopyright = '{year}, Project Jupyter Contributors'.format(year=date.today().year)\nauthor = 'Project Jupyter Contributors'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nwith open('../../jupyterhub/Chart.yaml') as f:\n chart = yaml.safe_load(f)\nversion = chart['version'].split('-', 1)[0]\nrelease = chart['version']\n\n# Project specific variables\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#confval-rst_epilog\nrst_epilog = \"\"\"\n.. |hub_version| replace:: {v}\n\"\"\".format(v=chart['appVersion'])\n\n\n# -- General configuration ---------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#general-configuration\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax',\n 'sphinx_copybutton',\n 'myst_parser']\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = []\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The suffix(es) of source filenames.\nsource_suffix = ['.rst', '.md']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n\n# -- Generate the Helm chart configuration reference from a schema file ------\n\n# header\nwith open('reference/reference.txt', 'r') as f:\n header_md = f.readlines()\nheader_md = header_md[1:]\nheader_md = [ln.strip('\\n') for ln in header_md]\n\n# schema\nwith open('../../jupyterhub/schema.yaml', 'r') as f:\n data = yaml.safe_load(f)\ndef parse_schema(d, md=[], depth=0, pre=''):\n \"\"\"\n Generate markdown headers from a passed python dictionary created by\n parsing a schema.yaml file.\n \"\"\"\n if 'properties' in d:\n depth += 1\n # Create markdown headers for each schema level\n for key, val in d['properties'].items():\n md.append('#'*(depth + 1) + ' ' + pre + key)\n md.append('')\n if 'description' in val:\n for ln in val['description'].split('\\n'):\n md.append(ln)\n md.append('')\n\n parse_schema(val, md, depth, pre+'{}.'.format(key))\n depth -= 1\n return md\nschema_md = parse_schema(data)\n\n# reference = header + schema\nreference_md = header_md + schema_md\nwith open('reference/reference.md', 'w') as f:\n f.write('\\n'.join(reference_md))\n\n\n# -- Options for linkcheck builder -------------------------------------------\n# ref: http://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-the-linkcheck-builder\nlinkcheck_ignore = [\n r'(.*)github\\.com(.*)#', # javascript based anchors\n r'(.*)/#%21(.*)/(.*)', # /#!forum/jupyter - encoded anchor edge case\n \"https://your-domain.com\", # example\n \"https://your-domain-name.com\", # example\n \"https://kubernetes.io/docs/tutorials/kubernetes-basics/\", # works\n \"https://cloud.ibm.com/kubernetes/catalog/create\", # works\n]\nlinkcheck_anchors_ignore = [\n \"/#!\",\n \"/#%21\",\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n# ref: http://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-output\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'pydata_sphinx_theme'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n\n\n# -- Below are options for more esoteric output -------------------------------\n# -----------------------------------------------------------------------------\n\n\n# -- Options for HTML help output ---------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-html-help-output\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-latex-output\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files.\nlatex_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc.tex', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'manual' # documentclass [howto, manual, or own class]\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-manual-page-output\n\n# One entry per manual page.\nman_pages = [\n (\n master_doc, # source start file\n 'zerotojupyterhub', # name\n 'Zero to JupyterHub', # description\n [author], # authors\n 1, # manual section\n ),\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-texinfo-output\n\n# Grouping the document tree into Texinfo files.\ntexinfo_documents = [\n (\n master_doc, # source start file\n 'ZeroToJupyterhubDoc', # target name\n 'Zero to JupyterHub', # title\n author, # author\n 'ZeroToJupyterhubDoc', # dir menu entry\n 'One line description of project.', # description\n 'Miscellaneous' # category\n ),\n]\n\n\n# -- Options for epub output -------------------------------------------------\n# ref: https://www.sphinx-doc.org/en/latest/usage/configuration.html#options-for-epub-output\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}]}
3,221
98
gh_patches_debug_6925
rasdani/github-patches
git_diff
mindsdb__mindsdb-2205
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Result of query with same column names with return different column names Issue exists for any 'internal' tables. If query multiple columns with same name: ``` select 1 as "a", 1 as "a" from information_schema.tables limit 1; ``` then result will be: ``` +------+------+ | a | a_2 | +------+------+ | 1 | 1 | +------+------+ ``` </issue> <code> [start of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] 1 import duckdb 2 import numpy as np 3 from mindsdb_sql import parse_sql 4 from mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy 5 from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender 6 7 from mindsdb.utilities.log import log 8 9 10 def _remove_table_name(root): 11 if isinstance(root, BinaryOperation): 12 _remove_table_name(root.args[0]) 13 _remove_table_name(root.args[1]) 14 elif isinstance(root, Identifier): 15 root.parts = [root.parts[-1]] 16 17 18 def query_df(df, query): 19 """ Perform simple query ('select' from one table, without subqueries and joins) on DataFrame. 20 21 Args: 22 df (pandas.DataFrame): data 23 query (mindsdb_sql.parser.ast.Select | str): select query 24 25 Returns: 26 pandas.DataFrame 27 """ 28 29 if isinstance(query, str): 30 query_ast = parse_sql(query, dialect='mysql') 31 else: 32 query_ast = query 33 34 if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False: 35 raise Exception("Only 'SELECT from TABLE' statements supported for internal query") 36 37 query_ast.from_table.parts = ['df_table'] 38 for identifier in query_ast.targets: 39 if isinstance(identifier, Identifier): 40 identifier.parts = [identifier.parts[-1]] 41 if isinstance(query_ast.order_by, list): 42 for orderby in query_ast.order_by: 43 if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier): 44 orderby.field.parts = [orderby.field.parts[-1]] 45 _remove_table_name(query_ast.where) 46 47 render = SqlalchemyRender('postgres') 48 try: 49 query_str = render.get_string(query_ast, with_failback=False) 50 except Exception as e: 51 log.error(f"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}") 52 query_str = render.get_string(query_ast, with_failback=True) 53 54 res = duckdb.query_df(df, 'df_table', query_str) 55 result_df = res.df() 56 result_df = result_df.replace({np.nan: None}) 57 return result_df 58 [end of mindsdb/api/mysql/mysql_proxy/utilities/sql.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py --- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py +++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py @@ -54,4 +54,13 @@ res = duckdb.query_df(df, 'df_table', query_str) result_df = res.df() result_df = result_df.replace({np.nan: None}) + + new_column_names = {} + real_column_names = [x[0] for x in res.description()] + for i, duck_column_name in enumerate(result_df.columns): + new_column_names[duck_column_name] = real_column_names[i] + result_df = result_df.rename( + new_column_names, + axis='columns' + ) return result_df
{"golden_diff": "diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py\n@@ -54,4 +54,13 @@\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n+\n+ new_column_names = {}\n+ real_column_names = [x[0] for x in res.description()]\n+ for i, duck_column_name in enumerate(result_df.columns):\n+ new_column_names[duck_column_name] = real_column_names[i]\n+ result_df = result_df.rename(\n+ new_column_names,\n+ axis='columns'\n+ )\n return result_df\n", "issue": "Result of query with same column names with return different column names\nIssue exists for any 'internal' tables. If query multiple columns with same name:\r\n```\r\nselect 1 as \"a\", 1 as \"a\" from information_schema.tables limit 1;\r\n```\r\nthen result will be:\r\n```\r\n+------+------+\r\n| a | a_2 |\r\n+------+------+\r\n| 1 | 1 |\r\n+------+------+\r\n```\n", "before_files": [{"content": "import duckdb\nimport numpy as np\nfrom mindsdb_sql import parse_sql\nfrom mindsdb_sql.parser.ast import Select, Identifier, BinaryOperation, OrderBy\nfrom mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender\n\nfrom mindsdb.utilities.log import log\n\n\ndef _remove_table_name(root):\n if isinstance(root, BinaryOperation):\n _remove_table_name(root.args[0])\n _remove_table_name(root.args[1])\n elif isinstance(root, Identifier):\n root.parts = [root.parts[-1]]\n\n\ndef query_df(df, query):\n \"\"\" Perform simple query ('select' from one table, without subqueries and joins) on DataFrame.\n\n Args:\n df (pandas.DataFrame): data\n query (mindsdb_sql.parser.ast.Select | str): select query\n\n Returns:\n pandas.DataFrame\n \"\"\"\n\n if isinstance(query, str):\n query_ast = parse_sql(query, dialect='mysql')\n else:\n query_ast = query\n\n if isinstance(query_ast, Select) is False or isinstance(query_ast.from_table, Identifier) is False:\n raise Exception(\"Only 'SELECT from TABLE' statements supported for internal query\")\n\n query_ast.from_table.parts = ['df_table']\n for identifier in query_ast.targets:\n if isinstance(identifier, Identifier):\n identifier.parts = [identifier.parts[-1]]\n if isinstance(query_ast.order_by, list):\n for orderby in query_ast.order_by:\n if isinstance(orderby, OrderBy) and isinstance(orderby.field, Identifier):\n orderby.field.parts = [orderby.field.parts[-1]]\n _remove_table_name(query_ast.where)\n\n render = SqlalchemyRender('postgres')\n try:\n query_str = render.get_string(query_ast, with_failback=False)\n except Exception as e:\n log.error(f\"Exception during query casting to 'postgres' dialect. Query: {str(query)}. Error: {e}\")\n query_str = render.get_string(query_ast, with_failback=True)\n\n res = duckdb.query_df(df, 'df_table', query_str)\n result_df = res.df()\n result_df = result_df.replace({np.nan: None})\n return result_df\n", "path": "mindsdb/api/mysql/mysql_proxy/utilities/sql.py"}]}
1,201
189
gh_patches_debug_53374
rasdani/github-patches
git_diff
chainer__chainer-7561
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support ellipsis in `Array::At` and `__getitem__` Depends on #7559 because `py::ellipsis` is supported from v2.3.0. </issue> <code> [start of chainerx/_fallback_workarounds.py] 1 # This file defines workaround implementation for 2 # NumPy-compatibility functions that fall back to NumPy/CuPy functions 3 # for native/cuda devices respecitvely. 4 # The workaround does not support backprop, and also requires external 5 # libraries mentioned above. 6 # Functions defined in this file should be considered to have high priority for 7 # genuine implementations. 8 import numpy 9 10 import chainerx 11 12 13 try: 14 import cupy 15 except Exception: 16 cupy = None 17 18 19 class _DummyContext: 20 def __enter__(self): 21 pass 22 23 def __exit__(self, type, value, traceback): 24 pass 25 26 27 _dummy_context = _DummyContext() 28 29 30 def _to_numpy(array): 31 assert isinstance(array, chainerx.ndarray) 32 return chainerx.to_numpy(array, copy=False) 33 34 35 def _from_numpy(array): 36 assert isinstance(array, numpy.ndarray) 37 return chainerx.array(array, copy=False) 38 39 40 def _to_cupy(array): 41 assert cupy is not None 42 # Convert to cupy.ndarray on the same device as source array 43 return chainerx._to_cupy(array) 44 45 46 def _from_cupy(array): 47 assert cupy is not None 48 assert isinstance(array, cupy.ndarray) 49 device = chainerx.get_device('cuda', array.device.id) 50 return chainerx._core._fromrawpointer( 51 array.data.mem.ptr, 52 array.shape, 53 array.dtype, 54 array.strides, 55 device, 56 array.data.ptr - array.data.mem.ptr, 57 array) 58 59 60 def _from_chx(array, check_backprop=True): 61 # Converts chainerx.ndarray to numpy/cupy.ndarray. 62 # Objects with other types are kept intact. 63 # Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray). 64 if not isinstance(array, chainerx.ndarray): 65 if (isinstance(array, numpy.ndarray) 66 or (cupy and isinstance(array, cupy.ndarray))): 67 raise TypeError( 68 'ChainerX function fallback using NumPy/CuPy arrays ' 69 'is not supported.') 70 # _from_chx is also called for slice and tuple objects 71 # Used to index a chx array 72 return None, _dummy_context, array 73 if check_backprop and array.is_backprop_required(): 74 raise RuntimeError( 75 'ChainerX function fallback using NumPy/CuPy is not ' 76 'supported for arrays that are connected to a graph.') 77 backend_name = array.device.backend.name 78 if backend_name == 'native': 79 return numpy, _dummy_context, _to_numpy(array) 80 if backend_name == 'cuda': 81 if cupy is None: 82 raise RuntimeError( 83 'ChainerX fallback implementation for cuda backend requires ' 84 'cupy to be installed.') 85 array_cupy = _to_cupy(array) 86 return cupy, array_cupy.device, array_cupy 87 raise RuntimeError( 88 'ChainerX fallback implementation only supports native or cuda ' 89 'backends.') 90 91 92 def _to_chx(array): 93 # Converts numpy/cupy.ndarray to chainerx.ndarray. 94 # Objects with other types are kept intact. 95 if isinstance(array, numpy.ndarray): 96 return _from_numpy(array) 97 elif cupy is not None and isinstance(array, cupy.ndarray): 98 return _from_cupy(array) 99 return array 100 101 102 def _populate_module_functions(): 103 104 def _fix(arr): 105 xp, dev, arr = _from_chx(arr) 106 with dev: 107 ret = xp.fix(arr) 108 ret = xp.asarray(ret) 109 return _to_chx(ret) 110 111 chainerx.fix = _fix 112 113 114 def _populate_ndarray(): 115 ndarray = chainerx.ndarray 116 117 # __getitem__ with advanced indexing 118 old_getitem = ndarray.__getitem__ 119 120 def __getitem__(arr, key): 121 try: 122 return old_getitem(arr, key) 123 except (IndexError, chainerx.DimensionError): 124 pass 125 126 is_backprop_required = arr.is_backprop_required() 127 128 xp, dev, arr = _from_chx(arr, check_backprop=False) 129 # The elements used for indexing the array might be 130 # also ChainerX arrays. _from_chx ignores 131 # other types and return them as-is 132 if isinstance(key, tuple): 133 key = tuple([_from_chx(k, check_backprop=False)[2] for k in key]) 134 else: 135 _, _, key = _from_chx(key, check_backprop=False) 136 137 with dev: 138 ret = arr[key] 139 140 # Doing this check after the fallback __getitem__ because the error 141 # which caused the fallback might not be due to advanced indexing. 142 # In such case the fallback __getitem__ should also raise the error. 143 144 if is_backprop_required: 145 raise RuntimeError( 146 'ChainerX getitem fallback for advanced indexing is not ' 147 'supported for arrays that are connected to a graph.') 148 149 return _to_chx(ret) 150 151 # __setitem__ with advanced indexing 152 def __setitem__(self, key, value): 153 if self.is_backprop_required(): 154 raise RuntimeError( 155 'ChainerX setitem fallback for advanced indexing is not ' 156 'supported for arrays that are connected to a graph.') 157 158 xp, dev, self = _from_chx(self) 159 if isinstance(key, tuple): 160 key = tuple([_from_chx(k)[2] for k in key]) 161 else: 162 _, _, key = _from_chx(key) 163 _, _, value = _from_chx(value) 164 165 with dev: 166 self[key] = value 167 168 ndarray.__setitem__ = __setitem__ 169 ndarray.__getitem__ = __getitem__ 170 171 def tolist(arr): 172 _, dev, arr = _from_chx(arr) 173 with dev: 174 ret = arr.tolist() 175 return ret 176 177 ndarray.tolist = tolist 178 179 180 def populate(): 181 _populate_module_functions() 182 _populate_ndarray() 183 [end of chainerx/_fallback_workarounds.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainerx/_fallback_workarounds.py b/chainerx/_fallback_workarounds.py --- a/chainerx/_fallback_workarounds.py +++ b/chainerx/_fallback_workarounds.py @@ -118,10 +118,8 @@ old_getitem = ndarray.__getitem__ def __getitem__(arr, key): - try: + if not isinstance(key, chainerx.ndarray): return old_getitem(arr, key) - except (IndexError, chainerx.DimensionError): - pass is_backprop_required = arr.is_backprop_required()
{"golden_diff": "diff --git a/chainerx/_fallback_workarounds.py b/chainerx/_fallback_workarounds.py\n--- a/chainerx/_fallback_workarounds.py\n+++ b/chainerx/_fallback_workarounds.py\n@@ -118,10 +118,8 @@\n old_getitem = ndarray.__getitem__\n \n def __getitem__(arr, key):\n- try:\n+ if not isinstance(key, chainerx.ndarray):\n return old_getitem(arr, key)\n- except (IndexError, chainerx.DimensionError):\n- pass\n \n is_backprop_required = arr.is_backprop_required()\n", "issue": "Support ellipsis in `Array::At` and `__getitem__`\nDepends on #7559 because `py::ellipsis` is supported from v2.3.0.\n", "before_files": [{"content": "# This file defines workaround implementation for\n# NumPy-compatibility functions that fall back to NumPy/CuPy functions\n# for native/cuda devices respecitvely.\n# The workaround does not support backprop, and also requires external\n# libraries mentioned above.\n# Functions defined in this file should be considered to have high priority for\n# genuine implementations.\nimport numpy\n\nimport chainerx\n\n\ntry:\n import cupy\nexcept Exception:\n cupy = None\n\n\nclass _DummyContext:\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, traceback):\n pass\n\n\n_dummy_context = _DummyContext()\n\n\ndef _to_numpy(array):\n assert isinstance(array, chainerx.ndarray)\n return chainerx.to_numpy(array, copy=False)\n\n\ndef _from_numpy(array):\n assert isinstance(array, numpy.ndarray)\n return chainerx.array(array, copy=False)\n\n\ndef _to_cupy(array):\n assert cupy is not None\n # Convert to cupy.ndarray on the same device as source array\n return chainerx._to_cupy(array)\n\n\ndef _from_cupy(array):\n assert cupy is not None\n assert isinstance(array, cupy.ndarray)\n device = chainerx.get_device('cuda', array.device.id)\n return chainerx._core._fromrawpointer(\n array.data.mem.ptr,\n array.shape,\n array.dtype,\n array.strides,\n device,\n array.data.ptr - array.data.mem.ptr,\n array)\n\n\ndef _from_chx(array, check_backprop=True):\n # Converts chainerx.ndarray to numpy/cupy.ndarray.\n # Objects with other types are kept intact.\n # Returns a pair: (xp, cupy device or dummy context, numpy/cupy.ndarray).\n if not isinstance(array, chainerx.ndarray):\n if (isinstance(array, numpy.ndarray)\n or (cupy and isinstance(array, cupy.ndarray))):\n raise TypeError(\n 'ChainerX function fallback using NumPy/CuPy arrays '\n 'is not supported.')\n # _from_chx is also called for slice and tuple objects\n # Used to index a chx array\n return None, _dummy_context, array\n if check_backprop and array.is_backprop_required():\n raise RuntimeError(\n 'ChainerX function fallback using NumPy/CuPy is not '\n 'supported for arrays that are connected to a graph.')\n backend_name = array.device.backend.name\n if backend_name == 'native':\n return numpy, _dummy_context, _to_numpy(array)\n if backend_name == 'cuda':\n if cupy is None:\n raise RuntimeError(\n 'ChainerX fallback implementation for cuda backend requires '\n 'cupy to be installed.')\n array_cupy = _to_cupy(array)\n return cupy, array_cupy.device, array_cupy\n raise RuntimeError(\n 'ChainerX fallback implementation only supports native or cuda '\n 'backends.')\n\n\ndef _to_chx(array):\n # Converts numpy/cupy.ndarray to chainerx.ndarray.\n # Objects with other types are kept intact.\n if isinstance(array, numpy.ndarray):\n return _from_numpy(array)\n elif cupy is not None and isinstance(array, cupy.ndarray):\n return _from_cupy(array)\n return array\n\n\ndef _populate_module_functions():\n\n def _fix(arr):\n xp, dev, arr = _from_chx(arr)\n with dev:\n ret = xp.fix(arr)\n ret = xp.asarray(ret)\n return _to_chx(ret)\n\n chainerx.fix = _fix\n\n\ndef _populate_ndarray():\n ndarray = chainerx.ndarray\n\n # __getitem__ with advanced indexing\n old_getitem = ndarray.__getitem__\n\n def __getitem__(arr, key):\n try:\n return old_getitem(arr, key)\n except (IndexError, chainerx.DimensionError):\n pass\n\n is_backprop_required = arr.is_backprop_required()\n\n xp, dev, arr = _from_chx(arr, check_backprop=False)\n # The elements used for indexing the array might be\n # also ChainerX arrays. _from_chx ignores\n # other types and return them as-is\n if isinstance(key, tuple):\n key = tuple([_from_chx(k, check_backprop=False)[2] for k in key])\n else:\n _, _, key = _from_chx(key, check_backprop=False)\n\n with dev:\n ret = arr[key]\n\n # Doing this check after the fallback __getitem__ because the error\n # which caused the fallback might not be due to advanced indexing.\n # In such case the fallback __getitem__ should also raise the error.\n\n if is_backprop_required:\n raise RuntimeError(\n 'ChainerX getitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n return _to_chx(ret)\n\n # __setitem__ with advanced indexing\n def __setitem__(self, key, value):\n if self.is_backprop_required():\n raise RuntimeError(\n 'ChainerX setitem fallback for advanced indexing is not '\n 'supported for arrays that are connected to a graph.')\n\n xp, dev, self = _from_chx(self)\n if isinstance(key, tuple):\n key = tuple([_from_chx(k)[2] for k in key])\n else:\n _, _, key = _from_chx(key)\n _, _, value = _from_chx(value)\n\n with dev:\n self[key] = value\n\n ndarray.__setitem__ = __setitem__\n ndarray.__getitem__ = __getitem__\n\n def tolist(arr):\n _, dev, arr = _from_chx(arr)\n with dev:\n ret = arr.tolist()\n return ret\n\n ndarray.tolist = tolist\n\n\ndef populate():\n _populate_module_functions()\n _populate_ndarray()\n", "path": "chainerx/_fallback_workarounds.py"}]}
2,315
136
gh_patches_debug_39133
rasdani/github-patches
git_diff
e-valuation__EvaP-1110
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix results view for courses where results can't be published The results page for a course where the results can't be published because it didn't get enough votes is not displayed correctly: - Contributors can't see any non-text answers (that's correct) - but not all contributor cards are collapsed accordingly. If at least one vote exists, the respective contributor card is open, the warning text is not shown and no useful information is shown inside the card (see screenshot 1). The card should be closed in this case. - Staff users initially see the "public" view and can change the view to `Myself`. On results pages of courses that didn't get enough votes, the `Public` button is missing (that was by design, because a public view does not exist for these courses) - now it's confusing because the initial view is something that's not accessible via the buttons. Screenshot 1 (Contributor): ![cardopen](https://user-images.githubusercontent.com/1781719/34488419-c82cd8d0-efd8-11e7-8f09-0c4ff0f06739.PNG) Screenshot 2 (Staff): ![viewbtn](https://user-images.githubusercontent.com/1781719/34488426-cd684118-efd8-11e7-80ba-c05e1ff34f93.PNG) </issue> <code> [start of evap/results/views.py] 1 from collections import OrderedDict, namedtuple 2 3 from django.core.exceptions import PermissionDenied 4 from django.shortcuts import get_object_or_404, render 5 from django.contrib.auth.decorators import login_required 6 7 from evap.evaluation.models import Semester, Degree, Contribution 8 from evap.evaluation.auth import internal_required 9 from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY 10 11 12 @internal_required 13 def index(request): 14 semesters = Semester.get_all_with_published_courses() 15 16 return render(request, "results_index.html", dict(semesters=semesters)) 17 18 19 @internal_required 20 def semester_detail(request, semester_id): 21 semester = get_object_or_404(Semester, id=semester_id) 22 if request.user.is_reviewer: 23 courses = list(semester.course_set.filter(state__in=["in_evaluation", "evaluated", "reviewed", "published"]).prefetch_related("degrees")) 24 else: 25 courses = list(semester.course_set.filter(state="published").prefetch_related("degrees")) 26 27 courses = [course for course in courses if course.can_user_see_course(request.user)] 28 29 # Annotate each course object with its grades. 30 for course in courses: 31 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 32 33 CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results')) 34 35 courses_by_degree = OrderedDict() 36 for degree in Degree.objects.all(): 37 courses_by_degree[degree] = CourseTuple([], []) 38 for course in courses: 39 if course.is_single_result: 40 for degree in course.degrees.all(): 41 section = calculate_results(course)[0] 42 result = section.results[0] 43 courses_by_degree[degree].single_results.append((course, result)) 44 else: 45 for degree in course.degrees.all(): 46 courses_by_degree[degree].courses.append(course) 47 48 template_data = dict(semester=semester, courses_by_degree=courses_by_degree) 49 return render(request, "results_semester_detail.html", template_data) 50 51 52 @login_required 53 def course_detail(request, semester_id, course_id): 54 semester = get_object_or_404(Semester, id=semester_id) 55 course = get_object_or_404(semester.course_set, id=course_id, semester=semester) 56 57 if not course.can_user_see_results(request.user): 58 raise PermissionDenied 59 60 sections = calculate_results(course) 61 62 if request.user.is_reviewer: 63 public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view. 64 else: 65 public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. 66 67 represented_users = list(request.user.represented_users.all()) 68 represented_users.append(request.user) 69 70 # filter text answers 71 for section in sections: 72 results = [] 73 for result in section.results: 74 if isinstance(result, TextResult): 75 answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)] 76 if answers: 77 results.append(TextResult(question=result.question, answers=answers)) 78 else: 79 results.append(result) 80 section.results[:] = results 81 82 # filter empty headings 83 for section in sections: 84 filtered_results = [] 85 for index in range(len(section.results)): 86 result = section.results[index] 87 # filter out if there are no more questions or the next question is also a heading question 88 if isinstance(result, HeadingResult): 89 if index == len(section.results) - 1 or isinstance(section.results[index + 1], HeadingResult): 90 continue 91 filtered_results.append(result) 92 section.results[:] = filtered_results 93 94 # remove empty sections 95 sections = [section for section in sections if section.results] 96 97 # group by contributor 98 course_sections = [] 99 contributor_sections = OrderedDict() 100 for section in sections: 101 if not section.results: 102 continue 103 if section.contributor is None: 104 course_sections.append(section) 105 else: 106 contributor_sections.setdefault(section.contributor, 107 {'total_votes': 0, 'sections': []})['sections'].append(section) 108 109 # Sum up all Sections for this contributor. 110 # If section is not a RatingResult: 111 # Add 1 as we assume it is a TextResult or something similar that should be displayed. 112 contributor_sections[section.contributor]['total_votes'] +=\ 113 sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results]) 114 115 # Show a warning if course is still in evaluation (for reviewer preview). 116 evaluation_warning = course.state != 'published' 117 118 # Results for a course might not be visible because there are not enough answers 119 # but it can still be "published" e.g. to show the comment results to contributors. 120 # Users who can open the results page see a warning message in this case. 121 sufficient_votes_warning = not course.can_publish_grades 122 123 show_grades = request.user.is_reviewer or course.can_publish_grades 124 125 course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) 126 127 template_data = dict( 128 course=course, 129 course_sections=course_sections, 130 contributor_sections=contributor_sections, 131 evaluation_warning=evaluation_warning, 132 sufficient_votes_warning=sufficient_votes_warning, 133 show_grades=show_grades, 134 reviewer=request.user.is_reviewer, 135 contributor=course.is_user_contributor_or_delegate(request.user), 136 can_download_grades=request.user.can_download_grades, 137 public_view=public_view) 138 return render(request, "results_course_detail.html", template_data) 139 140 141 def user_can_see_text_answer(user, represented_users, text_answer, public_view=False): 142 if public_view: 143 return False 144 if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY: 145 return False 146 if user.is_reviewer: 147 return True 148 149 contributor = text_answer.contribution.contributor 150 151 if text_answer.is_private: 152 return contributor == user 153 154 if text_answer.is_published: 155 if text_answer.contribution.responsible: 156 return contributor == user or user in contributor.delegates.all() 157 158 if contributor in represented_users: 159 return True 160 if text_answer.contribution.course.contributions.filter( 161 contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists(): 162 return True 163 if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter( 164 contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists(): 165 return True 166 167 return False 168 [end of evap/results/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/results/views.py b/evap/results/views.py --- a/evap/results/views.py +++ b/evap/results/views.py @@ -6,7 +6,8 @@ from evap.evaluation.models import Semester, Degree, Contribution from evap.evaluation.auth import internal_required -from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY +from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, \ + HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY, YesNoResult @internal_required @@ -64,9 +65,15 @@ else: public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view. + # If grades are not published, there is no public view + if not course.can_publish_grades: + public_view = False + represented_users = list(request.user.represented_users.all()) represented_users.append(request.user) + show_grades = request.user.is_reviewer or course.can_publish_grades + # filter text answers for section in sections: results = [] @@ -106,11 +113,13 @@ contributor_sections.setdefault(section.contributor, {'total_votes': 0, 'sections': []})['sections'].append(section) - # Sum up all Sections for this contributor. - # If section is not a RatingResult: - # Add 1 as we assume it is a TextResult or something similar that should be displayed. - contributor_sections[section.contributor]['total_votes'] +=\ - sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results]) + for result in section.results: + if isinstance(result, TextResult): + contributor_sections[section.contributor]['total_votes'] += 1 + elif isinstance(result, RatingResult) or isinstance(result, YesNoResult): + # Only count rating results if we show the grades. + if show_grades: + contributor_sections[section.contributor]['total_votes'] += result.total_count # Show a warning if course is still in evaluation (for reviewer preview). evaluation_warning = course.state != 'published' @@ -120,8 +129,6 @@ # Users who can open the results page see a warning message in this case. sufficient_votes_warning = not course.can_publish_grades - show_grades = request.user.is_reviewer or course.can_publish_grades - course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course) template_data = dict(
{"golden_diff": "diff --git a/evap/results/views.py b/evap/results/views.py\n--- a/evap/results/views.py\n+++ b/evap/results/views.py\n@@ -6,7 +6,8 @@\n \n from evap.evaluation.models import Semester, Degree, Contribution\n from evap.evaluation.auth import internal_required\n-from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n+from evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, \\\n+ HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY, YesNoResult\n \n \n @internal_required\n@@ -64,9 +65,15 @@\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n \n+ # If grades are not published, there is no public view\n+ if not course.can_publish_grades:\n+ public_view = False\n+\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n \n+ show_grades = request.user.is_reviewer or course.can_publish_grades\n+\n # filter text answers\n for section in sections:\n results = []\n@@ -106,11 +113,13 @@\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n \n- # Sum up all Sections for this contributor.\n- # If section is not a RatingResult:\n- # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n- contributor_sections[section.contributor]['total_votes'] +=\\\n- sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n+ for result in section.results:\n+ if isinstance(result, TextResult):\n+ contributor_sections[section.contributor]['total_votes'] += 1\n+ elif isinstance(result, RatingResult) or isinstance(result, YesNoResult):\n+ # Only count rating results if we show the grades.\n+ if show_grades:\n+ contributor_sections[section.contributor]['total_votes'] += result.total_count\n \n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n@@ -120,8 +129,6 @@\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n \n- show_grades = request.user.is_reviewer or course.can_publish_grades\n-\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n \n template_data = dict(\n", "issue": "Fix results view for courses where results can't be published\nThe results page for a course where the results can't be published because it didn't get enough votes is not displayed correctly:\r\n- Contributors can't see any non-text answers (that's correct) - but not all contributor cards are collapsed accordingly. If at least one vote exists, the respective contributor card is open, the warning text is not shown and no useful information is shown inside the card (see screenshot 1). The card should be closed in this case.\r\n- Staff users initially see the \"public\" view and can change the view to `Myself`. On results pages of courses that didn't get enough votes, the `Public` button is missing (that was by design, because a public view does not exist for these courses) - now it's confusing because the initial view is something that's not accessible via the buttons.\r\n\r\nScreenshot 1 (Contributor):\r\n![cardopen](https://user-images.githubusercontent.com/1781719/34488419-c82cd8d0-efd8-11e7-8f09-0c4ff0f06739.PNG)\r\n\r\nScreenshot 2 (Staff):\r\n![viewbtn](https://user-images.githubusercontent.com/1781719/34488426-cd684118-efd8-11e7-80ba-c05e1ff34f93.PNG)\r\n\n", "before_files": [{"content": "from collections import OrderedDict, namedtuple\n\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\n\nfrom evap.evaluation.models import Semester, Degree, Contribution\nfrom evap.evaluation.auth import internal_required\nfrom evap.results.tools import calculate_results, calculate_average_grades_and_deviation, TextResult, RatingResult, HeadingResult, COMMENT_STATES_REQUIRED_FOR_VISIBILITY\n\n\n@internal_required\ndef index(request):\n semesters = Semester.get_all_with_published_courses()\n\n return render(request, \"results_index.html\", dict(semesters=semesters))\n\n\n@internal_required\ndef semester_detail(request, semester_id):\n semester = get_object_or_404(Semester, id=semester_id)\n if request.user.is_reviewer:\n courses = list(semester.course_set.filter(state__in=[\"in_evaluation\", \"evaluated\", \"reviewed\", \"published\"]).prefetch_related(\"degrees\"))\n else:\n courses = list(semester.course_set.filter(state=\"published\").prefetch_related(\"degrees\"))\n\n courses = [course for course in courses if course.can_user_see_course(request.user)]\n\n # Annotate each course object with its grades.\n for course in courses:\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n CourseTuple = namedtuple('CourseTuple', ('courses', 'single_results'))\n\n courses_by_degree = OrderedDict()\n for degree in Degree.objects.all():\n courses_by_degree[degree] = CourseTuple([], [])\n for course in courses:\n if course.is_single_result:\n for degree in course.degrees.all():\n section = calculate_results(course)[0]\n result = section.results[0]\n courses_by_degree[degree].single_results.append((course, result))\n else:\n for degree in course.degrees.all():\n courses_by_degree[degree].courses.append(course)\n\n template_data = dict(semester=semester, courses_by_degree=courses_by_degree)\n return render(request, \"results_semester_detail.html\", template_data)\n\n\n@login_required\ndef course_detail(request, semester_id, course_id):\n semester = get_object_or_404(Semester, id=semester_id)\n course = get_object_or_404(semester.course_set, id=course_id, semester=semester)\n\n if not course.can_user_see_results(request.user):\n raise PermissionDenied\n\n sections = calculate_results(course)\n\n if request.user.is_reviewer:\n public_view = request.GET.get('public_view') != 'false' # if parameter is not given, show public view.\n else:\n public_view = request.GET.get('public_view') == 'true' # if parameter is not given, show own view.\n\n represented_users = list(request.user.represented_users.all())\n represented_users.append(request.user)\n\n # filter text answers\n for section in sections:\n results = []\n for result in section.results:\n if isinstance(result, TextResult):\n answers = [answer for answer in result.answers if user_can_see_text_answer(request.user, represented_users, answer, public_view)]\n if answers:\n results.append(TextResult(question=result.question, answers=answers))\n else:\n results.append(result)\n section.results[:] = results\n\n # filter empty headings\n for section in sections:\n filtered_results = []\n for index in range(len(section.results)):\n result = section.results[index]\n # filter out if there are no more questions or the next question is also a heading question\n if isinstance(result, HeadingResult):\n if index == len(section.results) - 1 or isinstance(section.results[index + 1], HeadingResult):\n continue\n filtered_results.append(result)\n section.results[:] = filtered_results\n\n # remove empty sections\n sections = [section for section in sections if section.results]\n\n # group by contributor\n course_sections = []\n contributor_sections = OrderedDict()\n for section in sections:\n if not section.results:\n continue\n if section.contributor is None:\n course_sections.append(section)\n else:\n contributor_sections.setdefault(section.contributor,\n {'total_votes': 0, 'sections': []})['sections'].append(section)\n\n # Sum up all Sections for this contributor.\n # If section is not a RatingResult:\n # Add 1 as we assume it is a TextResult or something similar that should be displayed.\n contributor_sections[section.contributor]['total_votes'] +=\\\n sum([s.total_count if isinstance(s, RatingResult) else 1 for s in section.results])\n\n # Show a warning if course is still in evaluation (for reviewer preview).\n evaluation_warning = course.state != 'published'\n\n # Results for a course might not be visible because there are not enough answers\n # but it can still be \"published\" e.g. to show the comment results to contributors.\n # Users who can open the results page see a warning message in this case.\n sufficient_votes_warning = not course.can_publish_grades\n\n show_grades = request.user.is_reviewer or course.can_publish_grades\n\n course.avg_grade, course.avg_deviation = calculate_average_grades_and_deviation(course)\n\n template_data = dict(\n course=course,\n course_sections=course_sections,\n contributor_sections=contributor_sections,\n evaluation_warning=evaluation_warning,\n sufficient_votes_warning=sufficient_votes_warning,\n show_grades=show_grades,\n reviewer=request.user.is_reviewer,\n contributor=course.is_user_contributor_or_delegate(request.user),\n can_download_grades=request.user.can_download_grades,\n public_view=public_view)\n return render(request, \"results_course_detail.html\", template_data)\n\n\ndef user_can_see_text_answer(user, represented_users, text_answer, public_view=False):\n if public_view:\n return False\n if text_answer.state not in COMMENT_STATES_REQUIRED_FOR_VISIBILITY:\n return False\n if user.is_reviewer:\n return True\n\n contributor = text_answer.contribution.contributor\n\n if text_answer.is_private:\n return contributor == user\n\n if text_answer.is_published:\n if text_answer.contribution.responsible:\n return contributor == user or user in contributor.delegates.all()\n\n if contributor in represented_users:\n return True\n if text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.ALL_COMMENTS).exists():\n return True\n if text_answer.contribution.is_general and text_answer.contribution.course.contributions.filter(\n contributor__in=represented_users, comment_visibility=Contribution.COURSE_COMMENTS).exists():\n return True\n\n return False\n", "path": "evap/results/views.py"}]}
2,678
605
gh_patches_debug_1190
rasdani/github-patches
git_diff
boto__botocore-1117
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support Python 3.6 Python 3.6 got released, and some distro (like Fedora) are swithcing to it. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import botocore 3 import sys 4 5 from setuptools import setup, find_packages 6 7 8 requires = ['jmespath>=0.7.1,<1.0.0', 9 'python-dateutil>=2.1,<3.0.0', 10 'docutils>=0.10'] 11 12 13 if sys.version_info[:2] == (2, 6): 14 # For python2.6 we have a few other dependencies. 15 # First we need an ordered dictionary so we use the 16 # 2.6 backport. 17 requires.append('ordereddict==1.1') 18 # Then we need simplejson. This is because we need 19 # a json version that allows us to specify we want to 20 # use an ordereddict instead of a normal dict for the 21 # JSON objects. The 2.7 json module has this. For 2.6 22 # we need simplejson. 23 requires.append('simplejson==3.3.0') 24 25 26 setup( 27 name='botocore', 28 version=botocore.__version__, 29 description='Low-level, data-driven core of boto 3.', 30 long_description=open('README.rst').read(), 31 author='Amazon Web Services', 32 url='https://github.com/boto/botocore', 33 scripts=[], 34 packages=find_packages(exclude=['tests*']), 35 package_data={'botocore': ['data/*.json', 'data/*/*.json'], 36 'botocore.vendored.requests': ['*.pem']}, 37 include_package_data=True, 38 install_requires=requires, 39 extras_require={ 40 ':python_version=="2.6"': [ 41 'ordereddict==1.1', 42 'simplejson==3.3.0', 43 ] 44 }, 45 license="Apache License 2.0", 46 classifiers=( 47 'Development Status :: 5 - Production/Stable', 48 'Intended Audience :: Developers', 49 'Intended Audience :: System Administrators', 50 'Natural Language :: English', 51 'License :: OSI Approved :: Apache Software License', 52 'Programming Language :: Python', 53 'Programming Language :: Python :: 2', 54 'Programming Language :: Python :: 2.6', 55 'Programming Language :: Python :: 2.7', 56 'Programming Language :: Python :: 3', 57 'Programming Language :: Python :: 3.3', 58 'Programming Language :: Python :: 3.4', 59 'Programming Language :: Python :: 3.5', 60 ), 61 ) 62 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,5 +57,6 @@ 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', ), )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,5 +57,6 @@\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n ),\n )\n", "issue": "Support Python 3.6\nPython 3.6 got released, and some distro (like Fedora) are swithcing to it.\n", "before_files": [{"content": "#!/usr/bin/env python\nimport botocore\nimport sys\n\nfrom setuptools import setup, find_packages\n\n\nrequires = ['jmespath>=0.7.1,<1.0.0',\n 'python-dateutil>=2.1,<3.0.0',\n 'docutils>=0.10']\n\n\nif sys.version_info[:2] == (2, 6):\n # For python2.6 we have a few other dependencies.\n # First we need an ordered dictionary so we use the\n # 2.6 backport.\n requires.append('ordereddict==1.1')\n # Then we need simplejson. This is because we need\n # a json version that allows us to specify we want to\n # use an ordereddict instead of a normal dict for the\n # JSON objects. The 2.7 json module has this. For 2.6\n # we need simplejson.\n requires.append('simplejson==3.3.0')\n\n\nsetup(\n name='botocore',\n version=botocore.__version__,\n description='Low-level, data-driven core of boto 3.',\n long_description=open('README.rst').read(),\n author='Amazon Web Services',\n url='https://github.com/boto/botocore',\n scripts=[],\n packages=find_packages(exclude=['tests*']),\n package_data={'botocore': ['data/*.json', 'data/*/*.json'],\n 'botocore.vendored.requests': ['*.pem']},\n include_package_data=True,\n install_requires=requires,\n extras_require={\n ':python_version==\"2.6\"': [\n 'ordereddict==1.1',\n 'simplejson==3.3.0',\n ]\n },\n license=\"Apache License 2.0\",\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ),\n)\n", "path": "setup.py"}]}
1,210
84
gh_patches_debug_8073
rasdani/github-patches
git_diff
ivy-llc__ivy-26775
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ifft2 ifft2 </issue> <code> [start of ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py] 1 import ivy 2 from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back 3 from ivy.func_wrapper import with_unsupported_dtypes 4 5 6 # --- Helpers --- # 7 # --------------- # 8 9 10 def _swap_direction(norm): 11 try: 12 return _SWAP_DIRECTION_MAP[norm] 13 except KeyError: 14 raise ValueError( 15 f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".' 16 ) from None 17 18 19 # --- Main --- # 20 # ------------ # 21 22 23 @to_ivy_arrays_and_back 24 def fft(a, n=None, axis=-1, norm=None): 25 return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n) 26 27 28 @with_unsupported_dtypes({"1.26.0 and below": ("int",)}, "numpy") 29 @to_ivy_arrays_and_back 30 def fftfreq(n, d=1.0): 31 if not isinstance( 32 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64)) 33 ): 34 raise ValueError("n should be an integer") 35 36 N = (n - 1) // 2 + 1 37 val = 1.0 / (n * d) 38 results = ivy.empty(tuple([n]), dtype=int) 39 40 p1 = ivy.arange(0, N, dtype=int) 41 results[:N] = p1 42 p2 = ivy.arange(-(n // 2), 0, dtype=int) 43 results[N:] = p2 44 45 return results * val 46 47 48 @to_ivy_arrays_and_back 49 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy") 50 def fftshift(x, axes=None): 51 x = ivy.asarray(x) 52 53 if axes is None: 54 axes = tuple(range(x.ndim)) 55 shift = [(dim // 2) for dim in x.shape] 56 elif isinstance( 57 axes, 58 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)), 59 ): 60 shift = x.shape[axes] // 2 61 else: 62 shift = [(x.shape[ax] // 2) for ax in axes] 63 64 roll = ivy.roll(x, shift, axis=axes) 65 66 return roll 67 68 69 @to_ivy_arrays_and_back 70 def ifft(a, n=None, axis=-1, norm=None): 71 a = ivy.array(a, dtype=ivy.complex128) 72 if norm is None: 73 norm = "backward" 74 return ivy.ifft(a, axis, norm=norm, n=n) 75 76 77 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") 78 @to_ivy_arrays_and_back 79 def ifftn(a, s=None, axes=None, norm=None): 80 a = ivy.asarray(a, dtype=ivy.complex128) 81 a = ivy.ifftn(a, s=s, axes=axes, norm=norm) 82 return a 83 84 85 @to_ivy_arrays_and_back 86 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy") 87 def ifftshift(x, axes=None): 88 x = ivy.asarray(x) 89 90 if axes is None: 91 axes = tuple(range(x.ndim)) 92 shift = [-(dim // 2) for dim in x.shape] 93 elif isinstance( 94 axes, 95 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)), 96 ): 97 shift = -(x.shape[axes] // 2) 98 else: 99 shift = [-(x.shape[ax] // 2) for ax in axes] 100 101 roll = ivy.roll(x, shift, axis=axes) 102 103 return roll 104 105 106 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy") 107 @to_ivy_arrays_and_back 108 def ihfft(a, n=None, axis=-1, norm=None): 109 if n is None: 110 n = a.shape[axis] 111 norm = _swap_direction(norm) 112 output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array) 113 return output 114 115 116 @with_unsupported_dtypes({"1.26.0 and below": ("float16",)}, "numpy") 117 @to_ivy_arrays_and_back 118 def rfft(a, n=None, axis=-1, norm=None): 119 if norm is None: 120 norm = "backward" 121 a = ivy.array(a, dtype=ivy.float64) 122 return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm) 123 124 125 @to_ivy_arrays_and_back 126 def rfftfreq(n, d=1.0): 127 if not isinstance( 128 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64)) 129 ): 130 raise ValueError("n should be an integer") 131 132 val = 1.0 / (n * d) 133 N = n // 2 + 1 134 results = ivy.arange(0, N, dtype=int) 135 return results * val 136 137 138 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") 139 @to_ivy_arrays_and_back 140 def rfftn(a, s=None, axes=None, norm=None): 141 a = ivy.asarray(a, dtype=ivy.complex128) 142 return ivy.rfftn(a, s=s, axes=axes, norm=norm) 143 144 145 _SWAP_DIRECTION_MAP = { 146 None: "forward", 147 "backward": "forward", 148 "ortho": "ortho", 149 "forward": "backward", 150 } 151 [end of ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py --- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py +++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py @@ -74,6 +74,14 @@ return ivy.ifft(a, axis, norm=norm, n=n) +@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") +@to_ivy_arrays_and_back +def ifft2(a, s=None, axes=(-2, -1), norm=None): + a = ivy.asarray(a, dtype=ivy.complex128) + a = ivy.ifftn(a, s=s, axes=axes, norm=norm) + return a + + @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy") @to_ivy_arrays_and_back def ifftn(a, s=None, axes=None, norm=None):
{"golden_diff": "diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n@@ -74,6 +74,14 @@\n return ivy.ifft(a, axis, norm=norm, n=n)\n \n \n+@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n+@to_ivy_arrays_and_back\n+def ifft2(a, s=None, axes=(-2, -1), norm=None):\n+ a = ivy.asarray(a, dtype=ivy.complex128)\n+ a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n+ return a\n+\n+\n @with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n @to_ivy_arrays_and_back\n def ifftn(a, s=None, axes=None, norm=None):\n", "issue": "ifft2\n\nifft2\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(\n f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n ) from None\n\n\n# --- Main --- #\n# ------------ #\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"int\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ihfft(a, n=None, axis=-1, norm=None):\n if n is None:\n n = a.shape[axis]\n norm = _swap_direction(norm)\n output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n return output\n\n\n@with_unsupported_dtypes({\"1.26.0 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = ivy.arange(0, N, dtype=int)\n return results * val\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n return ivy.rfftn(a, s=s, axes=axes, norm=norm)\n\n\n_SWAP_DIRECTION_MAP = {\n None: \"forward\",\n \"backward\": \"forward\",\n \"ortho\": \"ortho\",\n \"forward\": \"backward\",\n}\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}]}
2,226
257
gh_patches_debug_21336
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add correct size checking to IAM ManagedPolicy resource types *cfn-lint version: (cfn-lint 0.58.1)* *Description of issue.* The PolicyDocument specified in an IAM Managed Policy has a maximum length of 6,144 characters (after converted to JSON and minified). https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html When creating through CloudFormation an AWS::IAM::ManagedPolicy and using conditionals to leverage a 3 level environment creation differentiation, is throwing an error **E3502 PolicyDocument JSON text cannot be longer than 6144 characters**. That happens because the policy defined in CloudFormation with the 3 level conditionals exceeds the 6144 characters limit, but when it is created it only uses 1 of the paths in the conditionals so the size is not really over 6144 characters. This is the sample code in CloudFormation: ``` ResourcePolicy: Type: AWS::IAM::ManagedPolicy Properties: ManagedPolicyName: !Sub - ResourcePolicy PolicyDocument: Version: '2012-10-17' Statement: - Sid: LambdaExecutionLogs Effect: Allow Action: - logs:CreateLogGroup - logs:CreateLogStream - logs:PutLogEvents Resource: !Split - "," - !Join - "," - - !If - IsDev - !Join - "," - - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-${EnvNameCamelCase} - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-:* - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2 - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2:* - !Ref AWS::NoValue - !If - IsUat - !Join - "," - - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1 - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1:* - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2 - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2:* - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3 - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3:* - !Ref AWS::NoValue - !If - IsProd - !Join - "," - - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod - !Sub arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod:* - !Ref AWS::NoValue - Sid: ........ . . . ``` So the missing feature is to be able to check the size of the policy which is going to be finally created, not the source CloudFormation code, so in that regard it needs to evaluate the parameter and resolve the conditionals to end up with the final resource code to be created. </issue> <code> [start of src/cfnlint/rules/resources/properties/JsonSize.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import datetime 6 import json 7 import re 8 import six 9 import cfnlint.helpers 10 from cfnlint.rules import CloudFormationLintRule 11 from cfnlint.rules import RuleMatch 12 from cfnlint.helpers import RESOURCE_SPECS 13 14 15 class JsonSize(CloudFormationLintRule): 16 """Check if JSON Object Size is within the specified length""" 17 id = 'E3502' 18 shortdesc = 'Check if a JSON Object is within size limits' 19 description = 'Validate properties that are JSON values so that their length is within the limits' 20 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html' 21 tags = ['resources', 'limits', 'json'] 22 23 def initialize(self, cfn): 24 """Initialize the rule""" 25 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'): 26 self.resource_property_types.append(resource_type_spec) 27 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'): 28 self.resource_sub_property_types.append(property_type_spec) 29 30 def _serialize_date(self, obj): 31 if isinstance(obj, datetime.date): 32 return obj.isoformat() 33 raise TypeError('Object of type {} is not JSON serializable'.format(obj.__class__.__name__)) 34 35 def check_value(self, value, path, prop, cfn, specs): 36 """Check Role.AssumeRolePolicyDocument is within limits""" 37 matches = [] 38 39 def remove_functions(obj): 40 """ Replaces intrinsic functions with string """ 41 if isinstance(obj, dict): 42 new_obj = {} 43 if len(obj) == 1: 44 for k, v in obj.items(): 45 if k in cfnlint.helpers.FUNCTIONS: 46 if k == 'Fn::Sub': 47 if isinstance(v, six.string_types): 48 return re.sub(r'\${.*}', '', v) 49 if isinstance(v, list): 50 return re.sub(r'\${.*}', '', v[0]) 51 else: 52 new_obj[k] = remove_functions(v) 53 return new_obj 54 else: 55 for k, v in obj.items(): 56 new_obj[k] = remove_functions(v) 57 return new_obj 58 elif isinstance(obj, list): 59 new_list = [] 60 for v in obj: 61 new_list.append(remove_functions(v)) 62 return new_list 63 64 return obj 65 66 scenarios = cfn.get_object_without_nested_conditions(value, path) 67 json_max_size = specs.get('JsonMax') 68 for scenario in scenarios: 69 j = remove_functions(scenario['Object'][prop]) 70 if isinstance(j, six.string_types): 71 try: 72 j = json.loads(j) 73 except: #pylint: disable=bare-except 74 continue 75 if len(json.dumps(j, separators=(',', ':'), default=self._serialize_date)) > json_max_size: 76 if scenario['Scenario']: 77 message = '{0} JSON text cannot be longer than {1} characters when {2}' 78 scenario_text = ' and '.join(['when condition "%s" is %s' % ( 79 k, v) for (k, v) in scenario['Scenario'].items()]) 80 matches.append( 81 RuleMatch(path + [prop], message.format(prop, json_max_size, scenario_text))) 82 else: 83 message = '{0} JSON text cannot be longer than {1} characters' 84 matches.append( 85 RuleMatch( 86 path + [prop], 87 message.format(prop, json_max_size), 88 ) 89 ) 90 91 return matches 92 93 def check(self, cfn, properties, specs, path): 94 """Check itself""" 95 matches = [] 96 for p_value, p_path in properties.items_safe(path[:]): 97 for prop in p_value: 98 if prop in specs: 99 value = specs.get(prop).get('Value', {}) 100 if value: 101 value_type = value.get('ValueType', '') 102 primitive_type = specs.get(prop).get('PrimitiveType') 103 if primitive_type == 'Json': 104 matches.extend( 105 self.check_value( 106 p_value, p_path, prop, cfn, 107 RESOURCE_SPECS.get(cfn.regions[0]).get( 108 'ValueTypes').get(value_type, {}) 109 ) 110 ) 111 return matches 112 113 def match_resource_sub_properties(self, properties, property_type, path, cfn): 114 """Match for sub properties""" 115 matches = list() 116 117 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 118 'PropertyTypes').get(property_type, {}).get('Properties', {}) 119 matches.extend(self.check(cfn, properties, specs, path)) 120 121 return matches 122 123 def match_resource_properties(self, properties, resource_type, path, cfn): 124 """Check CloudFormation Properties""" 125 matches = list() 126 127 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 128 'ResourceTypes').get(resource_type, {}).get('Properties', {}) 129 matches.extend(self.check(cfn, properties, specs, path)) 130 131 return matches 132 [end of src/cfnlint/rules/resources/properties/JsonSize.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/rules/resources/properties/JsonSize.py b/src/cfnlint/rules/resources/properties/JsonSize.py --- a/src/cfnlint/rules/resources/properties/JsonSize.py +++ b/src/cfnlint/rules/resources/properties/JsonSize.py @@ -36,6 +36,7 @@ """Check Role.AssumeRolePolicyDocument is within limits""" matches = [] + #pylint: disable=too-many-return-statements def remove_functions(obj): """ Replaces intrinsic functions with string """ if isinstance(obj, dict): @@ -48,6 +49,8 @@ return re.sub(r'\${.*}', '', v) if isinstance(v, list): return re.sub(r'\${.*}', '', v[0]) + else: + return '' else: new_obj[k] = remove_functions(v) return new_obj
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/JsonSize.py b/src/cfnlint/rules/resources/properties/JsonSize.py\n--- a/src/cfnlint/rules/resources/properties/JsonSize.py\n+++ b/src/cfnlint/rules/resources/properties/JsonSize.py\n@@ -36,6 +36,7 @@\n \"\"\"Check Role.AssumeRolePolicyDocument is within limits\"\"\"\n matches = []\n \n+ #pylint: disable=too-many-return-statements\n def remove_functions(obj):\n \"\"\" Replaces intrinsic functions with string \"\"\"\n if isinstance(obj, dict):\n@@ -48,6 +49,8 @@\n return re.sub(r'\\${.*}', '', v)\n if isinstance(v, list):\n return re.sub(r'\\${.*}', '', v[0])\n+ else:\n+ return ''\n else:\n new_obj[k] = remove_functions(v)\n return new_obj\n", "issue": "Add correct size checking to IAM ManagedPolicy resource types\n*cfn-lint version: (cfn-lint 0.58.1)*\r\n\r\n*Description of issue.*\r\n\r\nThe PolicyDocument specified in an IAM Managed Policy has a maximum length of 6,144 characters (after converted to JSON and minified).\r\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html\r\n\r\nWhen creating through CloudFormation an AWS::IAM::ManagedPolicy and using conditionals to leverage a 3 level environment creation differentiation, is throwing an error **E3502 PolicyDocument JSON text cannot be longer than 6144 characters**.\r\n\r\nThat happens because the policy defined in CloudFormation with the 3 level conditionals exceeds the 6144 characters limit, but when it is created it only uses 1 of the paths in the conditionals so the size is not really over 6144 characters.\r\n\r\nThis is the sample code in CloudFormation:\r\n```\r\n ResourcePolicy:\r\n Type: AWS::IAM::ManagedPolicy\r\n Properties:\r\n ManagedPolicyName: !Sub\r\n - ResourcePolicy\r\n PolicyDocument:\r\n Version: '2012-10-17'\r\n Statement:\r\n - Sid: LambdaExecutionLogs\r\n Effect: Allow\r\n Action:\r\n - logs:CreateLogGroup\r\n - logs:CreateLogStream\r\n - logs:PutLogEvents\r\n Resource:\r\n !Split\r\n - \",\"\r\n - !Join\r\n - \",\"\r\n - - !If\r\n - IsDev\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-${EnvNameCamelCase}\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev1-:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdadev2:*\r\n - !Ref AWS::NoValue\r\n - !If\r\n - IsUat\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat1:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat2:*\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdauat3:*\r\n - !Ref AWS::NoValue\r\n - !If\r\n - IsProd\r\n - !Join\r\n - \",\"\r\n - \r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod\r\n - !Sub\r\n arn:aws:logs:${AWS::Region}:${AWS::AccountId}:log-group:/aws/lambda/lambdaprod:*\r\n - !Ref AWS::NoValue\r\n - Sid: ........\r\n . . .\r\n```\r\n\r\nSo the missing feature is to be able to check the size of the policy which is going to be finally created, not the source CloudFormation code, so in that regard it needs to evaluate the parameter and resolve the conditionals to end up with the final resource code to be created.\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport datetime\nimport json\nimport re\nimport six\nimport cfnlint.helpers\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass JsonSize(CloudFormationLintRule):\n \"\"\"Check if JSON Object Size is within the specified length\"\"\"\n id = 'E3502'\n shortdesc = 'Check if a JSON Object is within size limits'\n description = 'Validate properties that are JSON values so that their length is within the limits'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html'\n tags = ['resources', 'limits', 'json']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _serialize_date(self, obj):\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n raise TypeError('Object of type {} is not JSON serializable'.format(obj.__class__.__name__))\n\n def check_value(self, value, path, prop, cfn, specs):\n \"\"\"Check Role.AssumeRolePolicyDocument is within limits\"\"\"\n matches = []\n\n def remove_functions(obj):\n \"\"\" Replaces intrinsic functions with string \"\"\"\n if isinstance(obj, dict):\n new_obj = {}\n if len(obj) == 1:\n for k, v in obj.items():\n if k in cfnlint.helpers.FUNCTIONS:\n if k == 'Fn::Sub':\n if isinstance(v, six.string_types):\n return re.sub(r'\\${.*}', '', v)\n if isinstance(v, list):\n return re.sub(r'\\${.*}', '', v[0])\n else:\n new_obj[k] = remove_functions(v)\n return new_obj\n else:\n for k, v in obj.items():\n new_obj[k] = remove_functions(v)\n return new_obj\n elif isinstance(obj, list):\n new_list = []\n for v in obj:\n new_list.append(remove_functions(v))\n return new_list\n\n return obj\n\n scenarios = cfn.get_object_without_nested_conditions(value, path)\n json_max_size = specs.get('JsonMax')\n for scenario in scenarios:\n j = remove_functions(scenario['Object'][prop])\n if isinstance(j, six.string_types):\n try:\n j = json.loads(j)\n except: #pylint: disable=bare-except\n continue\n if len(json.dumps(j, separators=(',', ':'), default=self._serialize_date)) > json_max_size:\n if scenario['Scenario']:\n message = '{0} JSON text cannot be longer than {1} characters when {2}'\n scenario_text = ' and '.join(['when condition \"%s\" is %s' % (\n k, v) for (k, v) in scenario['Scenario'].items()])\n matches.append(\n RuleMatch(path + [prop], message.format(prop, json_max_size, scenario_text)))\n else:\n message = '{0} JSON text cannot be longer than {1} characters'\n matches.append(\n RuleMatch(\n path + [prop],\n message.format(prop, json_max_size),\n )\n )\n\n return matches\n\n def check(self, cfn, properties, specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in specs:\n value = specs.get(prop).get('Value', {})\n if value:\n value_type = value.get('ValueType', '')\n primitive_type = specs.get(prop).get('PrimitiveType')\n if primitive_type == 'Json':\n matches.extend(\n self.check_value(\n p_value, p_path, prop, cfn,\n RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ValueTypes').get(value_type, {})\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/JsonSize.py"}]}
2,819
197