problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
9.01k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 465
11.3k
| num_tokens_prompt
int64 557
2.05k
| num_tokens_diff
int64 48
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_49728 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6027 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Please cut a release of Video Intelligence
Need to unblock tests of samples
</issue>
<code>
[start of videointelligence/setup.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = 'google-cloud-videointelligence'
24 description = 'Google Cloud Video Intelligence API client library'
25 version = '1.3.0'
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = 'Development Status :: 5 - Production/Stable'
31 dependencies = [
32 'google-api-core[grpc]<2.0.0dev,>=0.1.0',
33 ]
34 extras = {
35 }
36
37
38 # Setup boilerplate below this line.
39
40 package_root = os.path.abspath(os.path.dirname(__file__))
41
42 readme_filename = os.path.join(package_root, 'README.rst')
43 with io.open(readme_filename, encoding='utf-8') as readme_file:
44 readme = readme_file.read()
45
46 # Only include packages under the 'google' namespace. Do not include tests,
47 # benchmarks, etc.
48 packages = [
49 package for package in setuptools.find_packages()
50 if package.startswith('google')]
51
52 # Determine which namespaces are needed.
53 namespaces = ['google']
54 if 'google.cloud' in packages:
55 namespaces.append('google.cloud')
56
57
58 setuptools.setup(
59 name=name,
60 version=version,
61 description=description,
62 long_description=readme,
63 author='Google LLC',
64 author_email='[email protected]',
65 license='Apache 2.0',
66 url='https://github.com/GoogleCloudPlatform/google-cloud-python',
67 classifiers=[
68 release_status,
69 'Intended Audience :: Developers',
70 'License :: OSI Approved :: Apache Software License',
71 'Programming Language :: Python',
72 'Programming Language :: Python :: 2',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3',
75 'Programming Language :: Python :: 3.4',
76 'Programming Language :: Python :: 3.5',
77 'Programming Language :: Python :: 3.6',
78 'Operating System :: OS Independent',
79 'Topic :: Internet',
80 ],
81 platforms='Posix; MacOS X; Windows',
82 packages=packages,
83 namespace_packages=namespaces,
84 install_requires=dependencies,
85 extras_require=extras,
86 include_package_data=True,
87 zip_safe=False,
88 )
89
[end of videointelligence/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/videointelligence/setup.py b/videointelligence/setup.py
--- a/videointelligence/setup.py
+++ b/videointelligence/setup.py
@@ -22,7 +22,7 @@
name = 'google-cloud-videointelligence'
description = 'Google Cloud Video Intelligence API client library'
-version = '1.3.0'
+version = '1.4.0'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
| {"golden_diff": "diff --git a/videointelligence/setup.py b/videointelligence/setup.py\n--- a/videointelligence/setup.py\n+++ b/videointelligence/setup.py\n@@ -22,7 +22,7 @@\n \n name = 'google-cloud-videointelligence'\n description = 'Google Cloud Video Intelligence API client library'\n-version = '1.3.0'\n+version = '1.4.0'\n # Should be one of:\n # 'Development Status :: 3 - Alpha'\n # 'Development Status :: 4 - Beta'\n", "issue": "Please cut a release of Video Intelligence\nNeed to unblock tests of samples\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-videointelligence'\ndescription = 'Google Cloud Video Intelligence API client library'\nversion = '1.3.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 5 - Production/Stable'\ndependencies = [\n 'google-api-core[grpc]<2.0.0dev,>=0.1.0',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "videointelligence/setup.py"}]} | 1,342 | 116 |
gh_patches_debug_54621 | rasdani/github-patches | git_diff | ibis-project__ibis-4790 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
docs: infinite build when using `mkdocs serve`
It appears that when using `mkdocs serve` the docs are repeatedly rebuilt to no end.
I suspect there's a file that we're generating (maybe the operation matrix?) that is being considered new and triggering a rebuild.
</issue>
<code>
[start of gen_matrix.py]
1 from pathlib import Path
2
3 import pandas as pd
4 import tomli
5
6 import ibis
7 import ibis.expr.operations as ops
8
9
10 def get_backends():
11 pyproject = tomli.loads(Path("pyproject.toml").read_text())
12 backends = pyproject["tool"]["poetry"]["plugins"]["ibis.backends"]
13 del backends["spark"]
14 return [(backend, getattr(ibis, backend)) for backend in sorted(backends.keys())]
15
16
17 def get_leaf_classes(op):
18 for child_class in op.__subclasses__():
19 if not child_class.__subclasses__():
20 yield child_class
21 else:
22 yield from get_leaf_classes(child_class)
23
24
25 EXCLUDED_OPS = {
26 # Never translates into anything
27 ops.UnresolvedExistsSubquery,
28 ops.UnresolvedNotExistsSubquery,
29 ops.ScalarParameter,
30 }
31
32 INCLUDED_OPS = {
33 # Parent class of MultiQuantile so it's ignored by `get_backends()`
34 ops.Quantile,
35 }
36
37
38 ICONS = {
39 True: ":material-check-decagram:{ .verified }",
40 False: ":material-cancel:{ .cancel }",
41 }
42
43
44 def main():
45 possible_ops = (
46 frozenset(get_leaf_classes(ops.Value)) | INCLUDED_OPS
47 ) - EXCLUDED_OPS
48
49 support = {"operation": [f"`{op.__name__}`" for op in possible_ops]}
50 support.update(
51 (name, list(map(backend.has_operation, possible_ops)))
52 for name, backend in get_backends()
53 )
54
55 df = pd.DataFrame(support).set_index("operation").sort_index()
56
57 counts = df.sum().sort_values(ascending=False)
58 num_ops = len(possible_ops)
59 coverage = (
60 counts.map(lambda n: f"_{n} ({round(100 * n / num_ops)}%)_")
61 .to_frame(name="**API Coverage**")
62 .T
63 )
64
65 ops_table = df.loc[:, counts.index].replace(ICONS)
66 table = pd.concat([coverage, ops_table])
67 dst = Path(__file__).parent.joinpath(
68 "docs",
69 "backends",
70 "support_matrix.csv",
71 )
72 table.to_csv(dst, index_label="Backends")
73
74
75 main()
76
[end of gen_matrix.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gen_matrix.py b/gen_matrix.py
--- a/gen_matrix.py
+++ b/gen_matrix.py
@@ -69,7 +69,15 @@
"backends",
"support_matrix.csv",
)
- table.to_csv(dst, index_label="Backends")
+
+ if dst.exists():
+ old = pd.read_csv(dst, index_col="Backends")
+ should_write = not old.equals(table)
+ else:
+ should_write = True
+
+ if should_write:
+ table.to_csv(dst, index_label="Backends")
main()
| {"golden_diff": "diff --git a/gen_matrix.py b/gen_matrix.py\n--- a/gen_matrix.py\n+++ b/gen_matrix.py\n@@ -69,7 +69,15 @@\n \"backends\",\n \"support_matrix.csv\",\n )\n- table.to_csv(dst, index_label=\"Backends\")\n+\n+ if dst.exists():\n+ old = pd.read_csv(dst, index_col=\"Backends\")\n+ should_write = not old.equals(table)\n+ else:\n+ should_write = True\n+\n+ if should_write:\n+ table.to_csv(dst, index_label=\"Backends\")\n \n \n main()\n", "issue": "docs: infinite build when using `mkdocs serve`\nIt appears that when using `mkdocs serve` the docs are repeatedly rebuilt to no end.\r\n\r\nI suspect there's a file that we're generating (maybe the operation matrix?) that is being considered new and triggering a rebuild.\n", "before_files": [{"content": "from pathlib import Path\n\nimport pandas as pd\nimport tomli\n\nimport ibis\nimport ibis.expr.operations as ops\n\n\ndef get_backends():\n pyproject = tomli.loads(Path(\"pyproject.toml\").read_text())\n backends = pyproject[\"tool\"][\"poetry\"][\"plugins\"][\"ibis.backends\"]\n del backends[\"spark\"]\n return [(backend, getattr(ibis, backend)) for backend in sorted(backends.keys())]\n\n\ndef get_leaf_classes(op):\n for child_class in op.__subclasses__():\n if not child_class.__subclasses__():\n yield child_class\n else:\n yield from get_leaf_classes(child_class)\n\n\nEXCLUDED_OPS = {\n # Never translates into anything\n ops.UnresolvedExistsSubquery,\n ops.UnresolvedNotExistsSubquery,\n ops.ScalarParameter,\n}\n\nINCLUDED_OPS = {\n # Parent class of MultiQuantile so it's ignored by `get_backends()`\n ops.Quantile,\n}\n\n\nICONS = {\n True: \":material-check-decagram:{ .verified }\",\n False: \":material-cancel:{ .cancel }\",\n}\n\n\ndef main():\n possible_ops = (\n frozenset(get_leaf_classes(ops.Value)) | INCLUDED_OPS\n ) - EXCLUDED_OPS\n\n support = {\"operation\": [f\"`{op.__name__}`\" for op in possible_ops]}\n support.update(\n (name, list(map(backend.has_operation, possible_ops)))\n for name, backend in get_backends()\n )\n\n df = pd.DataFrame(support).set_index(\"operation\").sort_index()\n\n counts = df.sum().sort_values(ascending=False)\n num_ops = len(possible_ops)\n coverage = (\n counts.map(lambda n: f\"_{n} ({round(100 * n / num_ops)}%)_\")\n .to_frame(name=\"**API Coverage**\")\n .T\n )\n\n ops_table = df.loc[:, counts.index].replace(ICONS)\n table = pd.concat([coverage, ops_table])\n dst = Path(__file__).parent.joinpath(\n \"docs\",\n \"backends\",\n \"support_matrix.csv\",\n )\n table.to_csv(dst, index_label=\"Backends\")\n\n\nmain()\n", "path": "gen_matrix.py"}]} | 1,220 | 130 |
gh_patches_debug_36560 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-2099 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: day_switch_time does not seem to be working correctly
### I Have A Problem With:
The integration in general
### What's Your Problem
I have day switch time set to `20:00` but the day switches at `01:19`
<img width="228" alt="Screenshot 2024-05-08 at 07 24 31" src="https://github.com/mampfes/hacs_waste_collection_schedule/assets/49797976/c84d1086-1fd8-462a-a206-77ed846838a0">
config:
```
waste_collection_schedule:
sources:
- name: maldon_gov_uk
args:
uprn: "uprn"
customize:
- type: Refuse Collection
- type: Recycling
day_switch_time: "20:00"
fetch_time: 01:00
```
### Source (if relevant)
Maldon District Council / maldon.gov.uk
### Logs
_No response_
### Relevant Configuration
```YAML
waste_collection_schedule:
sources:
- name: maldon_gov_uk
args:
uprn: "uprn"
customize:
- type: Refuse Collection
- type: Recycling
day_switch_time: "20:00"
fetch_time: 01:00
```
### Checklist Source Error
- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)
- [X] Checked that the website of your service provider is still working
- [ ] Tested my attributes on the service provider website (if possible)
- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version)
### Checklist Sensor Error
- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)
### Required
- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.
- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py]
1 import re
2 from datetime import datetime
3
4 import requests
5 from bs4 import BeautifulSoup
6 from waste_collection_schedule import Collection
7
8 TITLE = "Maldon District Council"
9
10 DESCRIPTION = ("Source for www.maldon.gov.uk services for Maldon, UK")
11
12 URL = "https://www.maldon.gov.uk/"
13
14 TEST_CASES = {
15 "test 1": {"uprn": "200000917928"},
16 "test 2": {"uprn": "100091258454"},
17 }
18
19 API_URL = "https://maldon.suez.co.uk/maldon/ServiceSummary?uprn="
20
21 ICON_MAP = {
22 "Refuse Collection": "mdi:trash-can",
23 "Recycling": "mdi:recycle",
24 "Green": "mdi:leaf",
25 "Food": "mdi:food-apple",
26 }
27
28 class Source:
29 def __init__(self, uprn: str):
30 self._uprn = uprn
31
32 def _extract_future_date(self, text):
33 # parse both dates and return the future one
34 dates = re.findall(r'\d{2}/\d{2}/\d{4}', text)
35 dates = [datetime.strptime(date, '%d/%m/%Y').date() for date in dates]
36 return max(dates)
37
38 def fetch(self):
39 entries = []
40
41 session = requests.Session()
42
43 r = session.get(f"{API_URL}{self._uprn}")
44 soup = BeautifulSoup(r.text, features="html.parser")
45 collections = soup.find_all("div", {"class": "panel-default"})
46
47 if not collections:
48 raise Exception("No collections found for given UPRN")
49
50 for collection in collections:
51 # check is a collection row
52 title = collection.find("h2", {"class": "panel-title"}).text.strip()
53
54 if title == "Other Services" or "You are not currently subscribed" in collection.text:
55 continue
56
57 entries.append(
58 Collection(
59 date=self._extract_future_date(collection.text),
60 t=title,
61 icon=ICON_MAP.get(title),
62 )
63 )
64
65 return entries
66
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py
@@ -3,17 +3,17 @@
import requests
from bs4 import BeautifulSoup
-from waste_collection_schedule import Collection
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Maldon District Council"
-DESCRIPTION = ("Source for www.maldon.gov.uk services for Maldon, UK")
+DESCRIPTION = "Source for www.maldon.gov.uk services for Maldon, UK"
URL = "https://www.maldon.gov.uk/"
TEST_CASES = {
"test 1": {"uprn": "200000917928"},
- "test 2": {"uprn": "100091258454"},
+ "test 2": {"uprn": 100091258454},
}
API_URL = "https://maldon.suez.co.uk/maldon/ServiceSummary?uprn="
@@ -25,15 +25,15 @@
"Food": "mdi:food-apple",
}
+
class Source:
def __init__(self, uprn: str):
self._uprn = uprn
- def _extract_future_date(self, text):
+ def _extract_dates(self, text):
# parse both dates and return the future one
- dates = re.findall(r'\d{2}/\d{2}/\d{4}', text)
- dates = [datetime.strptime(date, '%d/%m/%Y').date() for date in dates]
- return max(dates)
+ dates = re.findall(r"\d{2}/\d{2}/\d{4}", text)
+ return [datetime.strptime(date, "%d/%m/%Y").date() for date in dates]
def fetch(self):
entries = []
@@ -51,15 +51,19 @@
# check is a collection row
title = collection.find("h2", {"class": "panel-title"}).text.strip()
- if title == "Other Services" or "You are not currently subscribed" in collection.text:
+ if (
+ title == "Other Services"
+ or "You are not currently subscribed" in collection.text
+ ):
continue
- entries.append(
- Collection(
- date=self._extract_future_date(collection.text),
- t=title,
- icon=ICON_MAP.get(title),
+ for date in self._extract_dates(collection.text):
+ entries.append(
+ Collection(
+ date=date,
+ t=title,
+ icon=ICON_MAP.get(title),
+ )
)
- )
return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py\n@@ -3,17 +3,17 @@\n \n import requests\n from bs4 import BeautifulSoup\n-from waste_collection_schedule import Collection\n+from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"Maldon District Council\"\n \n-DESCRIPTION = (\"Source for www.maldon.gov.uk services for Maldon, UK\")\n+DESCRIPTION = \"Source for www.maldon.gov.uk services for Maldon, UK\"\n \n URL = \"https://www.maldon.gov.uk/\"\n \n TEST_CASES = {\n \"test 1\": {\"uprn\": \"200000917928\"},\n- \"test 2\": {\"uprn\": \"100091258454\"},\n+ \"test 2\": {\"uprn\": 100091258454},\n }\n \n API_URL = \"https://maldon.suez.co.uk/maldon/ServiceSummary?uprn=\"\n@@ -25,15 +25,15 @@\n \"Food\": \"mdi:food-apple\",\n }\n \n+\n class Source:\n def __init__(self, uprn: str):\n self._uprn = uprn\n \n- def _extract_future_date(self, text):\n+ def _extract_dates(self, text):\n # parse both dates and return the future one\n- dates = re.findall(r'\\d{2}/\\d{2}/\\d{4}', text)\n- dates = [datetime.strptime(date, '%d/%m/%Y').date() for date in dates]\n- return max(dates)\n+ dates = re.findall(r\"\\d{2}/\\d{2}/\\d{4}\", text)\n+ return [datetime.strptime(date, \"%d/%m/%Y\").date() for date in dates]\n \n def fetch(self):\n entries = []\n@@ -51,15 +51,19 @@\n # check is a collection row\n title = collection.find(\"h2\", {\"class\": \"panel-title\"}).text.strip()\n \n- if title == \"Other Services\" or \"You are not currently subscribed\" in collection.text:\n+ if (\n+ title == \"Other Services\"\n+ or \"You are not currently subscribed\" in collection.text\n+ ):\n continue\n \n- entries.append(\n- Collection(\n- date=self._extract_future_date(collection.text),\n- t=title,\n- icon=ICON_MAP.get(title),\n+ for date in self._extract_dates(collection.text):\n+ entries.append(\n+ Collection(\n+ date=date,\n+ t=title,\n+ icon=ICON_MAP.get(title),\n+ )\n )\n- )\n \n return entries\n", "issue": "[Bug]: day_switch_time does not seem to be working correctly\n### I Have A Problem With:\n\nThe integration in general\n\n### What's Your Problem\n\nI have day switch time set to `20:00` but the day switches at `01:19`\r\n\r\n<img width=\"228\" alt=\"Screenshot 2024-05-08 at 07 24 31\" src=\"https://github.com/mampfes/hacs_waste_collection_schedule/assets/49797976/c84d1086-1fd8-462a-a206-77ed846838a0\">\r\n\r\nconfig:\r\n\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: maldon_gov_uk\r\n args:\r\n uprn: \"uprn\"\r\n customize:\r\n - type: Refuse Collection\r\n - type: Recycling\r\n day_switch_time: \"20:00\"\r\n fetch_time: 01:00\r\n```\r\n\n\n### Source (if relevant)\n\nMaldon District Council / maldon.gov.uk\n\n### Logs\n\n_No response_\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: maldon_gov_uk\r\n args:\r\n uprn: \"uprn\"\r\n customize:\r\n - type: Refuse Collection\r\n - type: Recycling\r\n day_switch_time: \"20:00\"\r\n fetch_time: 01:00\n```\n\n\n### Checklist Source Error\n\n- [ ] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [ ] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "import re\nfrom datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Maldon District Council\"\n\nDESCRIPTION = (\"Source for www.maldon.gov.uk services for Maldon, UK\")\n\nURL = \"https://www.maldon.gov.uk/\"\n\nTEST_CASES = {\n \"test 1\": {\"uprn\": \"200000917928\"},\n \"test 2\": {\"uprn\": \"100091258454\"},\n}\n\nAPI_URL = \"https://maldon.suez.co.uk/maldon/ServiceSummary?uprn=\"\n\nICON_MAP = {\n \"Refuse Collection\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Green\": \"mdi:leaf\",\n \"Food\": \"mdi:food-apple\",\n}\n\nclass Source:\n def __init__(self, uprn: str):\n self._uprn = uprn\n\n def _extract_future_date(self, text):\n # parse both dates and return the future one\n dates = re.findall(r'\\d{2}/\\d{2}/\\d{4}', text)\n dates = [datetime.strptime(date, '%d/%m/%Y').date() for date in dates]\n return max(dates)\n\n def fetch(self):\n entries = []\n\n session = requests.Session()\n\n r = session.get(f\"{API_URL}{self._uprn}\")\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n collections = soup.find_all(\"div\", {\"class\": \"panel-default\"})\n\n if not collections:\n raise Exception(\"No collections found for given UPRN\")\n\n for collection in collections:\n # check is a collection row\n title = collection.find(\"h2\", {\"class\": \"panel-title\"}).text.strip()\n\n if title == \"Other Services\" or \"You are not currently subscribed\" in collection.text:\n continue\n\n entries.append(\n Collection(\n date=self._extract_future_date(collection.text),\n t=title,\n icon=ICON_MAP.get(title),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/maldon_gov_uk.py"}]} | 1,699 | 679 |
gh_patches_debug_4220 | rasdani/github-patches | git_diff | freedomofpress__securedrop-6586 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clean up outdated references to Python 3.5
*This is a good first issue for new contributors to take on, if you have any questions, please ask on the task or in our [Gitter room](https://gitter.im/freedomofpress/securedrop)!*
## Description
SecureDrop now runs on focal, which uses Python 3.8. But there are still references to Python 3.5 that need to be cleaned up. Some should be dropped outright, others should be switched to 3.8.
Some examples:
```
$ rg python3\\.5
install_files/securedrop-grsec-focal/opt/securedrop/paxctld.conf
98:/usr/bin/python3.5 E
molecule/testinfra/vars/app-qubes-staging.yml
13:securedrop_venv_site_packages: "{{ securedrop_venv }}/lib/python3.5/site-packages"
molecule/testinfra/vars/prodVM.yml
12:securedrop_venv_site_packages: "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages"
install_files/ansible-base/roles/build-securedrop-app-code-deb-pkg/files/usr.sbin.apache2
71: /etc/python3.5/sitecustomize.py r,
109: /usr/local/lib/python3.5/dist-packages/ r,
117: /opt/venvs/securedrop-app-code/lib/python3.5/ r,
118: /opt/venvs/securedrop-app-code/lib/python3.5/** rm,
securedrop/scripts/rqrequeue
9:sys.path.insert(0, "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages") # noqa: E402
securedrop/scripts/shredder
14: 0, "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages"
securedrop/scripts/source_deleter
14: 0, "/opt/venvs/securedrop-app-code/lib/python3.5/site-packages"
$ rg 3\\.5 --type=py
molecule/builder-focal/tests/test_build_dependencies.py
6:SECUREDROP_PYTHON_VERSION = os.environ.get("SECUREDROP_PYTHON_VERSION", "3.5")
setup.py
14: python_requires=">=3.5",
```
</issue>
<code>
[start of setup.py]
1 import setuptools
2
3 long_description = "The SecureDrop whistleblower platform."
4
5 setuptools.setup(
6 name="securedrop-app-code",
7 version="2.5.0~rc1",
8 author="Freedom of the Press Foundation",
9 author_email="[email protected]",
10 description="SecureDrop Server",
11 long_description=long_description,
12 long_description_content_type="text/markdown",
13 license="AGPLv3+",
14 python_requires=">=3.5",
15 url="https://github.com/freedomofpress/securedrop",
16 classifiers=(
17 "Development Status :: 5 - Stable",
18 "Programming Language :: Python :: 3",
19 "Topic :: Software Development :: Libraries :: Python Modules",
20 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
21 "Intended Audience :: Developers",
22 "Operating System :: OS Independent",
23 ),
24 )
25
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -11,7 +11,7 @@
long_description=long_description,
long_description_content_type="text/markdown",
license="AGPLv3+",
- python_requires=">=3.5",
+ python_requires=">=3.8",
url="https://github.com/freedomofpress/securedrop",
classifiers=(
"Development Status :: 5 - Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n- python_requires=\">=3.5\",\n+ python_requires=\">=3.8\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=(\n \"Development Status :: 5 - Stable\",\n", "issue": "Clean up outdated references to Python 3.5\n*This is a good first issue for new contributors to take on, if you have any questions, please ask on the task or in our [Gitter room](https://gitter.im/freedomofpress/securedrop)!*\r\n\r\n## Description\r\n\r\nSecureDrop now runs on focal, which uses Python 3.8. But there are still references to Python 3.5 that need to be cleaned up. Some should be dropped outright, others should be switched to 3.8.\r\n\r\n\r\nSome examples:\r\n```\r\n$ rg python3\\\\.5\r\ninstall_files/securedrop-grsec-focal/opt/securedrop/paxctld.conf\r\n98:/usr/bin/python3.5\t\tE\r\n\r\nmolecule/testinfra/vars/app-qubes-staging.yml\r\n13:securedrop_venv_site_packages: \"{{ securedrop_venv }}/lib/python3.5/site-packages\"\r\n\r\nmolecule/testinfra/vars/prodVM.yml\r\n12:securedrop_venv_site_packages: \"/opt/venvs/securedrop-app-code/lib/python3.5/site-packages\"\r\n\r\ninstall_files/ansible-base/roles/build-securedrop-app-code-deb-pkg/files/usr.sbin.apache2\r\n71: /etc/python3.5/sitecustomize.py r,\r\n109: /usr/local/lib/python3.5/dist-packages/ r,\r\n117: /opt/venvs/securedrop-app-code/lib/python3.5/ r,\r\n118: /opt/venvs/securedrop-app-code/lib/python3.5/** rm,\r\n\r\nsecuredrop/scripts/rqrequeue\r\n9:sys.path.insert(0, \"/opt/venvs/securedrop-app-code/lib/python3.5/site-packages\") # noqa: E402\r\n\r\nsecuredrop/scripts/shredder\r\n14: 0, \"/opt/venvs/securedrop-app-code/lib/python3.5/site-packages\"\r\n\r\nsecuredrop/scripts/source_deleter\r\n14: 0, \"/opt/venvs/securedrop-app-code/lib/python3.5/site-packages\"\r\n$ rg 3\\\\.5 --type=py\r\nmolecule/builder-focal/tests/test_build_dependencies.py\r\n6:SECUREDROP_PYTHON_VERSION = os.environ.get(\"SECUREDROP_PYTHON_VERSION\", \"3.5\")\r\n\r\nsetup.py\r\n14: python_requires=\">=3.5\",\r\n```\n", "before_files": [{"content": "import setuptools\n\nlong_description = \"The SecureDrop whistleblower platform.\"\n\nsetuptools.setup(\n name=\"securedrop-app-code\",\n version=\"2.5.0~rc1\",\n author=\"Freedom of the Press Foundation\",\n author_email=\"[email protected]\",\n description=\"SecureDrop Server\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"AGPLv3+\",\n python_requires=\">=3.5\",\n url=\"https://github.com/freedomofpress/securedrop\",\n classifiers=(\n \"Development Status :: 5 - Stable\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n ),\n)\n", "path": "setup.py"}]} | 1,275 | 108 |
gh_patches_debug_2361 | rasdani/github-patches | git_diff | tough-dev-school__education-backend-1502 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
В админке во всех названиях курса выводить название потока
Сейчас совершенно непонятно, к какому потоку принадлежит курс — приходится догадываться по старшинству. Надо, чтобы вот тут (см. ниже) выводилось название ProductGroup, к которому привязан курс.
<img width="1511" alt="Screenshot 2022-06-20 at 10 55 18" src="https://user-images.githubusercontent.com/1592663/174552950-bf6ee7e8-6ba7-43f7-af90-5ba2fededfd7.png">
</issue>
<code>
[start of src/products/models/course.py]
1 from django.apps import apps
2 from django.core.exceptions import ValidationError
3 from django.db.models import OuterRef
4 from django.db.models import QuerySet
5 from django.db.models import Subquery
6 from django.utils.translation import gettext_lazy as _
7
8 from app.files import RandomFileName
9 from app.models import models
10 from mailing.tasks import send_mail
11 from products.models.base import Shippable
12 from users.models import User
13
14
15 class CourseQuerySet(QuerySet):
16 def for_lms(self) -> QuerySet["Course"]:
17 return self.filter(
18 display_in_lms=True,
19 ).with_course_homepage()
20
21 def with_course_homepage(self) -> QuerySet["Course"]:
22 materials = (
23 apps.get_model("notion.Material")
24 .objects.filter(
25 course=OuterRef("pk"),
26 is_home_page=True,
27 )
28 .order_by(
29 "-created",
30 )
31 .values(
32 "page_id",
33 )
34 )
35
36 return self.annotate(
37 home_page_slug=Subquery(materials[:1]),
38 )
39
40
41 CourseManager = models.Manager.from_queryset(CourseQuerySet)
42
43
44 class Course(Shippable):
45 objects = CourseManager()
46
47 name_genitive = models.CharField(_("Genitive name"), max_length=255, help_text="«мастер-класса о TDD». К примеру для записей.")
48 zoomus_webinar_id = models.CharField(
49 _("Zoom.us webinar ID"), max_length=255, null=True, blank=True, help_text=_("If set, every user who purcashes this course gets invited")
50 )
51
52 welcome_letter_template_id = models.CharField(
53 _("Welcome letter template id"), max_length=255, blank=True, null=True, help_text=_("Will be sent upon purchase if set")
54 )
55 gift_welcome_letter_template_id = models.CharField(
56 _("Special welcome letter template id for gifts"), max_length=255, blank=True, null=True, help_text=_("If not set, common welcome letter will be used")
57 )
58 display_in_lms = models.BooleanField(_("Display in LMS"), default=True, help_text=_("If disabled will not be shown in LMS"))
59
60 diploma_template_context = models.JSONField(default=dict, blank=True)
61
62 disable_triggers = models.BooleanField(_("Disable all triggers"), default=False)
63
64 confirmation_template_id = models.CharField(
65 _("Confirmation template id"),
66 max_length=255,
67 null=True,
68 blank=True,
69 help_text=_("If set user sill receive this message upon creating zero-priced order"),
70 )
71 confirmation_success_url = models.URLField(_("Confirmation success URL"), null=True, blank=True)
72
73 cover = models.ImageField(
74 verbose_name=_("Cover image"),
75 upload_to=RandomFileName("courses/covers"),
76 blank=True,
77 help_text=_("The cover image of course"),
78 )
79
80 class Meta:
81 ordering = ["-id"]
82 verbose_name = _("Course")
83 verbose_name_plural = _("Courses")
84 db_table = "courses_course"
85
86 def clean(self):
87 """Check for correct setting of confirmation_template_id and confirmation_success_url"""
88 if not self.confirmation_template_id and not self.confirmation_success_url:
89 return
90
91 if not all([self.confirmation_template_id, self.confirmation_success_url]):
92 raise ValidationError(_("Both confirmation_template_id and confirmation_success_url must be set"))
93
94 if self.price != 0:
95 raise ValidationError(_("Courses with confirmation should have zero price"))
96
97 def get_purchased_users(self) -> QuerySet[User]:
98 return User.objects.filter(
99 pk__in=apps.get_model("studying.Study").objects.filter(course=self).values_list("student", flat=True),
100 )
101
102 def send_email_to_all_purchased_users(self, template_id: str):
103 for user in self.get_purchased_users().iterator():
104 send_mail.delay(
105 to=user.email,
106 template_id=template_id,
107 )
108
[end of src/products/models/course.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/products/models/course.py b/src/products/models/course.py
--- a/src/products/models/course.py
+++ b/src/products/models/course.py
@@ -105,3 +105,11 @@
to=user.email,
template_id=template_id,
)
+
+ def __str__(self) -> str:
+ name = getattr(self, "name", None)
+ group = getattr(self, "group", None)
+ if name is not None and group is not None:
+ return f"{name} - {group.name}"
+
+ return super().__str__()
| {"golden_diff": "diff --git a/src/products/models/course.py b/src/products/models/course.py\n--- a/src/products/models/course.py\n+++ b/src/products/models/course.py\n@@ -105,3 +105,11 @@\n to=user.email,\n template_id=template_id,\n )\n+\n+ def __str__(self) -> str:\n+ name = getattr(self, \"name\", None)\n+ group = getattr(self, \"group\", None)\n+ if name is not None and group is not None:\n+ return f\"{name} - {group.name}\"\n+\n+ return super().__str__()\n", "issue": "\u0412 \u0430\u0434\u043c\u0438\u043d\u043a\u0435 \u0432\u043e \u0432\u0441\u0435\u0445 \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u044f\u0445 \u043a\u0443\u0440\u0441\u0430 \u0432\u044b\u0432\u043e\u0434\u0438\u0442\u044c \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 \u043f\u043e\u0442\u043e\u043a\u0430\n\u0421\u0435\u0439\u0447\u0430\u0441 \u0441\u043e\u0432\u0435\u0440\u0448\u0435\u043d\u043d\u043e \u043d\u0435\u043f\u043e\u043d\u044f\u0442\u043d\u043e, \u043a \u043a\u0430\u043a\u043e\u043c\u0443 \u043f\u043e\u0442\u043e\u043a\u0443 \u043f\u0440\u0438\u043d\u0430\u0434\u043b\u0435\u0436\u0438\u0442 \u043a\u0443\u0440\u0441 \u2014\u00a0\u043f\u0440\u0438\u0445\u043e\u0434\u0438\u0442\u0441\u044f \u0434\u043e\u0433\u0430\u0434\u044b\u0432\u0430\u0442\u044c\u0441\u044f \u043f\u043e \u0441\u0442\u0430\u0440\u0448\u0438\u043d\u0441\u0442\u0432\u0443. \u041d\u0430\u0434\u043e, \u0447\u0442\u043e\u0431\u044b \u0432\u043e\u0442 \u0442\u0443\u0442 (\u0441\u043c. \u043d\u0438\u0436\u0435) \u0432\u044b\u0432\u043e\u0434\u0438\u043b\u043e\u0441\u044c \u043d\u0430\u0437\u0432\u0430\u043d\u0438\u0435 ProductGroup, \u043a \u043a\u043e\u0442\u043e\u0440\u043e\u043c\u0443 \u043f\u0440\u0438\u0432\u044f\u0437\u0430\u043d \u043a\u0443\u0440\u0441.\r\n\r\n<img width=\"1511\" alt=\"Screenshot 2022-06-20 at 10 55 18\" src=\"https://user-images.githubusercontent.com/1592663/174552950-bf6ee7e8-6ba7-43f7-af90-5ba2fededfd7.png\">\r\n\r\n\n", "before_files": [{"content": "from django.apps import apps\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import OuterRef\nfrom django.db.models import QuerySet\nfrom django.db.models import Subquery\nfrom django.utils.translation import gettext_lazy as _\n\nfrom app.files import RandomFileName\nfrom app.models import models\nfrom mailing.tasks import send_mail\nfrom products.models.base import Shippable\nfrom users.models import User\n\n\nclass CourseQuerySet(QuerySet):\n def for_lms(self) -> QuerySet[\"Course\"]:\n return self.filter(\n display_in_lms=True,\n ).with_course_homepage()\n\n def with_course_homepage(self) -> QuerySet[\"Course\"]:\n materials = (\n apps.get_model(\"notion.Material\")\n .objects.filter(\n course=OuterRef(\"pk\"),\n is_home_page=True,\n )\n .order_by(\n \"-created\",\n )\n .values(\n \"page_id\",\n )\n )\n\n return self.annotate(\n home_page_slug=Subquery(materials[:1]),\n )\n\n\nCourseManager = models.Manager.from_queryset(CourseQuerySet)\n\n\nclass Course(Shippable):\n objects = CourseManager()\n\n name_genitive = models.CharField(_(\"Genitive name\"), max_length=255, help_text=\"\u00ab\u043c\u0430\u0441\u0442\u0435\u0440-\u043a\u043b\u0430\u0441\u0441\u0430 \u043e TDD\u00bb. \u041a \u043f\u0440\u0438\u043c\u0435\u0440\u0443 \u0434\u043b\u044f \u0437\u0430\u043f\u0438\u0441\u0435\u0439.\")\n zoomus_webinar_id = models.CharField(\n _(\"Zoom.us webinar ID\"), max_length=255, null=True, blank=True, help_text=_(\"If set, every user who purcashes this course gets invited\")\n )\n\n welcome_letter_template_id = models.CharField(\n _(\"Welcome letter template id\"), max_length=255, blank=True, null=True, help_text=_(\"Will be sent upon purchase if set\")\n )\n gift_welcome_letter_template_id = models.CharField(\n _(\"Special welcome letter template id for gifts\"), max_length=255, blank=True, null=True, help_text=_(\"If not set, common welcome letter will be used\")\n )\n display_in_lms = models.BooleanField(_(\"Display in LMS\"), default=True, help_text=_(\"If disabled will not be shown in LMS\"))\n\n diploma_template_context = models.JSONField(default=dict, blank=True)\n\n disable_triggers = models.BooleanField(_(\"Disable all triggers\"), default=False)\n\n confirmation_template_id = models.CharField(\n _(\"Confirmation template id\"),\n max_length=255,\n null=True,\n blank=True,\n help_text=_(\"If set user sill receive this message upon creating zero-priced order\"),\n )\n confirmation_success_url = models.URLField(_(\"Confirmation success URL\"), null=True, blank=True)\n\n cover = models.ImageField(\n verbose_name=_(\"Cover image\"),\n upload_to=RandomFileName(\"courses/covers\"),\n blank=True,\n help_text=_(\"The cover image of course\"),\n )\n\n class Meta:\n ordering = [\"-id\"]\n verbose_name = _(\"Course\")\n verbose_name_plural = _(\"Courses\")\n db_table = \"courses_course\"\n\n def clean(self):\n \"\"\"Check for correct setting of confirmation_template_id and confirmation_success_url\"\"\"\n if not self.confirmation_template_id and not self.confirmation_success_url:\n return\n\n if not all([self.confirmation_template_id, self.confirmation_success_url]):\n raise ValidationError(_(\"Both confirmation_template_id and confirmation_success_url must be set\"))\n\n if self.price != 0:\n raise ValidationError(_(\"Courses with confirmation should have zero price\"))\n\n def get_purchased_users(self) -> QuerySet[User]:\n return User.objects.filter(\n pk__in=apps.get_model(\"studying.Study\").objects.filter(course=self).values_list(\"student\", flat=True),\n )\n\n def send_email_to_all_purchased_users(self, template_id: str):\n for user in self.get_purchased_users().iterator():\n send_mail.delay(\n to=user.email,\n template_id=template_id,\n )\n", "path": "src/products/models/course.py"}]} | 1,750 | 131 |
gh_patches_debug_28795 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mercurial project imported from bitbucket stuck in 'Triggered' state
The docs for pylibftdi are set to be built (via a POST trigger) from https://bitbucket.org/codedstructure/pylibftdi, but builds (https://readthedocs.org/builds/pylibftdi/) are stuck at 'Triggered'.
Based on comments in #435 I set the project up to build against a github mirror, and that worked successfully, so it seems (from #435) that this is likely an hg issue.
</issue>
<code>
[start of readthedocs/vcs_support/backends/hg.py]
1 import csv
2 from StringIO import StringIO
3
4 from projects.exceptions import ProjectImportError
5 from vcs_support.base import BaseVCS, VCSVersion
6
7
8 class Backend(BaseVCS):
9 supports_tags = True
10 supports_branches = True
11 fallback_branch = 'default'
12
13 def update(self):
14 super(Backend, self).update()
15 retcode = self.run('hg', 'status')[0]
16 if retcode == 0:
17 return self.pull()
18 else:
19 return self.clone()
20
21 def pull(self):
22 pull_output = self.run('hg', 'pull')
23 if pull_output[0] != 0:
24 raise ProjectImportError(
25 ("Failed to get code from '%s' (hg pull): %s"
26 % (self.repo_url, pull_output[0]))
27 )
28 update_output = self.run('hg', 'update', '-C')[0]
29 if update_output[0] != 0:
30 raise ProjectImportError(
31 ("Failed to get code from '%s' (hg update): %s"
32 % (self.repo_url, pull_output[0]))
33 )
34 return update_output
35
36 def clone(self):
37 output = self.run('hg', 'clone', self.repo_url, '.')
38 if output[0] != 0:
39 raise ProjectImportError(
40 ("Failed to get code from '%s' (hg clone): %s"
41 % (self.repo_url, output[0]))
42 )
43 return output
44
45 @property
46 def branches(self):
47 retcode, stdout = self.run('hg', 'branches', '-q')[:2]
48 # error (or no tags found)
49 if retcode != 0:
50 return []
51 return self.parse_branches(stdout)
52
53 def parse_branches(self, data):
54 """
55 stable
56 default
57 """
58
59 names = [name.lstrip() for name in data.splitlines()]
60 return [VCSVersion(self, name, name) for name in names if name]
61
62 @property
63 def tags(self):
64 retcode, stdout = self.run('hg', 'tags')[:2]
65 # error (or no tags found)
66 if retcode != 0:
67 return []
68 return self.parse_tags(stdout)
69
70 def parse_tags(self, data):
71 """
72 Parses output of show-ref --tags, eg:
73
74 tip 278:c4b2d21db51a
75 0.2.2 152:6b0364d98837
76 0.2.1 117:a14b7b6ffa03
77 0.1 50:30c2c6b3a055
78 """
79 # parse the lines into a list of tuples (commit-hash, tag ref name)
80 raw_tags = csv.reader(StringIO(data), delimiter=' ')
81 vcs_tags = []
82 for row in raw_tags:
83 row = filter(lambda f: f != '', row)
84 if row == []:
85 continue
86 name, commit = row
87 if name == 'tip':
88 continue
89 revision, commit_hash = commit.split(':')
90 vcs_tags.append(VCSVersion(self, commit_hash, name))
91 return vcs_tags
92
93 def checkout(self, identifier=None):
94 super(Backend, self).checkout()
95 if not identifier:
96 identifier = 'tip'
97 retcode = self.run('hg', 'status')[0]
98 if retcode == 0:
99 self.run('hg', 'pull')
100 return self.run('hg', 'update', '-C', identifier)
101 else:
102 self.clone()
103 return self.run('hg', 'update', '-C', identifier)
104
[end of readthedocs/vcs_support/backends/hg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/readthedocs/vcs_support/backends/hg.py b/readthedocs/vcs_support/backends/hg.py
--- a/readthedocs/vcs_support/backends/hg.py
+++ b/readthedocs/vcs_support/backends/hg.py
@@ -1,6 +1,3 @@
-import csv
-from StringIO import StringIO
-
from projects.exceptions import ProjectImportError
from vcs_support.base import BaseVCS, VCSVersion
@@ -69,19 +66,24 @@
def parse_tags(self, data):
"""
- Parses output of show-ref --tags, eg:
+ Parses output of `hg tags`, eg:
+
+ tip 278:c4b2d21db51a
+ 0.2.2 152:6b0364d98837
+ 0.2.1 117:a14b7b6ffa03
+ 0.1 50:30c2c6b3a055
+ maintenance release 1 10:f83c32fe8126
- tip 278:c4b2d21db51a
- 0.2.2 152:6b0364d98837
- 0.2.1 117:a14b7b6ffa03
- 0.1 50:30c2c6b3a055
+ Into VCSVersion objects with the tag name as verbose_name and the
+ commit hash as identifier.
"""
- # parse the lines into a list of tuples (commit-hash, tag ref name)
- raw_tags = csv.reader(StringIO(data), delimiter=' ')
vcs_tags = []
- for row in raw_tags:
- row = filter(lambda f: f != '', row)
- if row == []:
+ tag_lines = [line.strip() for line in data.splitlines()]
+ # starting from the rhs of each line, split a single value (changeset)
+ # off at whitespace; the tag name is the string to the left of that
+ tag_pairs = [line.rsplit(None, 1) for line in tag_lines]
+ for row in tag_pairs:
+ if len(row) != 2:
continue
name, commit = row
if name == 'tip':
| {"golden_diff": "diff --git a/readthedocs/vcs_support/backends/hg.py b/readthedocs/vcs_support/backends/hg.py\n--- a/readthedocs/vcs_support/backends/hg.py\n+++ b/readthedocs/vcs_support/backends/hg.py\n@@ -1,6 +1,3 @@\n-import csv\n-from StringIO import StringIO\n-\n from projects.exceptions import ProjectImportError\n from vcs_support.base import BaseVCS, VCSVersion\n \n@@ -69,19 +66,24 @@\n \n def parse_tags(self, data):\n \"\"\"\n- Parses output of show-ref --tags, eg:\n+ Parses output of `hg tags`, eg:\n+\n+ tip 278:c4b2d21db51a\n+ 0.2.2 152:6b0364d98837\n+ 0.2.1 117:a14b7b6ffa03\n+ 0.1 50:30c2c6b3a055\n+ maintenance release 1 10:f83c32fe8126\n \n- tip 278:c4b2d21db51a\n- 0.2.2 152:6b0364d98837\n- 0.2.1 117:a14b7b6ffa03\n- 0.1 50:30c2c6b3a055\n+ Into VCSVersion objects with the tag name as verbose_name and the\n+ commit hash as identifier.\n \"\"\"\n- # parse the lines into a list of tuples (commit-hash, tag ref name)\n- raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n- for row in raw_tags:\n- row = filter(lambda f: f != '', row)\n- if row == []:\n+ tag_lines = [line.strip() for line in data.splitlines()]\n+ # starting from the rhs of each line, split a single value (changeset)\n+ # off at whitespace; the tag name is the string to the left of that\n+ tag_pairs = [line.rsplit(None, 1) for line in tag_lines]\n+ for row in tag_pairs:\n+ if len(row) != 2:\n continue\n name, commit = row\n if name == 'tip':\n", "issue": "mercurial project imported from bitbucket stuck in 'Triggered' state\nThe docs for pylibftdi are set to be built (via a POST trigger) from https://bitbucket.org/codedstructure/pylibftdi, but builds (https://readthedocs.org/builds/pylibftdi/) are stuck at 'Triggered'.\n\nBased on comments in #435 I set the project up to build against a github mirror, and that worked successfully, so it seems (from #435) that this is likely an hg issue.\n\n", "before_files": [{"content": "import csv\nfrom StringIO import StringIO\n\nfrom projects.exceptions import ProjectImportError\nfrom vcs_support.base import BaseVCS, VCSVersion\n\n\nclass Backend(BaseVCS):\n supports_tags = True\n supports_branches = True\n fallback_branch = 'default'\n\n def update(self):\n super(Backend, self).update()\n retcode = self.run('hg', 'status')[0]\n if retcode == 0:\n return self.pull()\n else:\n return self.clone()\n\n def pull(self):\n pull_output = self.run('hg', 'pull')\n if pull_output[0] != 0:\n raise ProjectImportError(\n (\"Failed to get code from '%s' (hg pull): %s\"\n % (self.repo_url, pull_output[0]))\n )\n update_output = self.run('hg', 'update', '-C')[0]\n if update_output[0] != 0:\n raise ProjectImportError(\n (\"Failed to get code from '%s' (hg update): %s\"\n % (self.repo_url, pull_output[0]))\n )\n return update_output\n\n def clone(self):\n output = self.run('hg', 'clone', self.repo_url, '.')\n if output[0] != 0:\n raise ProjectImportError(\n (\"Failed to get code from '%s' (hg clone): %s\"\n % (self.repo_url, output[0]))\n )\n return output\n\n @property\n def branches(self):\n retcode, stdout = self.run('hg', 'branches', '-q')[:2]\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n stable\n default\n \"\"\"\n\n names = [name.lstrip() for name in data.splitlines()]\n return [VCSVersion(self, name, name) for name in names if name]\n\n @property\n def tags(self):\n retcode, stdout = self.run('hg', 'tags')[:2]\n # error (or no tags found)\n if retcode != 0:\n return []\n return self.parse_tags(stdout)\n\n def parse_tags(self, data):\n \"\"\"\n Parses output of show-ref --tags, eg:\n\n tip 278:c4b2d21db51a\n 0.2.2 152:6b0364d98837\n 0.2.1 117:a14b7b6ffa03\n 0.1 50:30c2c6b3a055\n \"\"\"\n # parse the lines into a list of tuples (commit-hash, tag ref name)\n raw_tags = csv.reader(StringIO(data), delimiter=' ')\n vcs_tags = []\n for row in raw_tags:\n row = filter(lambda f: f != '', row)\n if row == []:\n continue\n name, commit = row\n if name == 'tip':\n continue\n revision, commit_hash = commit.split(':')\n vcs_tags.append(VCSVersion(self, commit_hash, name))\n return vcs_tags\n\n def checkout(self, identifier=None):\n super(Backend, self).checkout()\n if not identifier:\n identifier = 'tip'\n retcode = self.run('hg', 'status')[0]\n if retcode == 0:\n self.run('hg', 'pull')\n return self.run('hg', 'update', '-C', identifier)\n else:\n self.clone()\n return self.run('hg', 'update', '-C', identifier)\n", "path": "readthedocs/vcs_support/backends/hg.py"}]} | 1,673 | 560 |
gh_patches_debug_3875 | rasdani/github-patches | git_diff | kartoza__prj.app-813 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error 500: Editing Answers.
# Problem
When I select the edit option for the answers on http://changelog.qgis.org/id/inasafe-realtime2/
Then I get error 500.
</issue>
<code>
[start of django_project/lesson/views/answer.py]
1 # coding=utf-8
2 """Answer views."""
3
4 from django.core.urlresolvers import reverse
5 from django.views.generic import (
6 CreateView,
7 DeleteView,
8 UpdateView,
9 )
10 from django.shortcuts import get_object_or_404
11 from django.utils.translation import ugettext_lazy as _
12
13 from braces.views import LoginRequiredMixin
14
15 from lesson.forms.answer import AnswerForm
16 from lesson.models.answer import Answer
17 from lesson.models.worksheet_question import WorksheetQuestion
18
19
20 class AnswerMixin(object):
21 """Mixin class to provide standard settings for Answer."""
22
23 model = Answer
24 form_class = AnswerForm
25
26
27 class AnswerCreateView(
28 LoginRequiredMixin, AnswerMixin, CreateView):
29 """Create view for Answer."""
30
31 context_object_name = 'answer'
32 template_name = 'create.html'
33 creation_label = _('Add answer')
34
35 def get_success_url(self):
36 """Define the redirect URL
37
38 After successful creation of the object, the User will be redirected
39 to the unapproved Version list page for the object's parent Worksheet
40
41 :returns: URL
42 :rtype: HttpResponse
43 """
44 return reverse('worksheet-detail', kwargs={
45 'pk': self.object.question.worksheet.pk,
46 'section_slug': self.object.question.worksheet.section.slug,
47 'project_slug': self.object.question.worksheet.section.project.slug
48 })
49
50 def get_form_kwargs(self):
51 """Get keyword arguments from form.
52
53 :returns keyword argument from the form
54 :rtype dict
55 """
56 kwargs = super(AnswerCreateView, self).get_form_kwargs()
57 pk = self.kwargs['question_pk']
58 kwargs['question'] = get_object_or_404(WorksheetQuestion, pk=pk)
59 return kwargs
60
61
62 # noinspection PyAttributeOutsideInit
63 class AnswerDeleteView(
64 LoginRequiredMixin,
65 AnswerMixin,
66 DeleteView):
67 """Delete view for Answer."""
68
69 context_object_name = 'answer'
70 template_name = 'answer/delete.html'
71
72 def get_success_url(self):
73 """Define the redirect URL.
74
75 After successful deletion of the object, the User will be redirected
76 to the Certifying Organisation list page
77 for the object's parent Worksheet.
78
79 :returns: URL
80 :rtype: HttpResponse
81 """
82 return reverse('worksheet-detail', kwargs={
83 'pk': self.object.question.worksheet.pk,
84 'section_slug': self.object.question.worksheet.section.slug,
85 'project_slug': self.object.question.worksheet.section.project.slug
86 })
87
88
89 # noinspection PyAttributeOutsideInit
90 class AnswerUpdateView(
91 LoginRequiredMixin,
92 AnswerMixin,
93 UpdateView):
94 """Update view for Answer."""
95
96 context_object_name = 'answer'
97 template_name = 'update.html'
98 update_label = _('Update answer')
99
100 def get_form_kwargs(self):
101 """Get keyword arguments from form.
102
103 :returns keyword argument from the form
104 :rtype: dict
105 """
106 kwargs = super(AnswerUpdateView, self).get_form_kwargs()
107 answer = get_object_or_404(Answer, self.pk_url_kwarg)
108 kwargs['question'] = answer.question
109 return kwargs
110
111 def get_success_url(self):
112 """Define the redirect URL.
113
114 After successful update of the object, the User will be redirected to
115 the specification list page for the object's parent Worksheet.
116
117 :returns: URL
118 :rtype: HttpResponse
119 """
120 return reverse('worksheet-detail', kwargs={
121 'pk': self.object.question.worksheet.pk,
122 'section_slug': self.object.question.worksheet.section.slug,
123 'project_slug': self.object.question.worksheet.section.project.slug
124 })
125
[end of django_project/lesson/views/answer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/lesson/views/answer.py b/django_project/lesson/views/answer.py
--- a/django_project/lesson/views/answer.py
+++ b/django_project/lesson/views/answer.py
@@ -104,7 +104,7 @@
:rtype: dict
"""
kwargs = super(AnswerUpdateView, self).get_form_kwargs()
- answer = get_object_or_404(Answer, self.pk_url_kwarg)
+ answer = get_object_or_404(Answer, pk=kwargs['instance'].pk)
kwargs['question'] = answer.question
return kwargs
| {"golden_diff": "diff --git a/django_project/lesson/views/answer.py b/django_project/lesson/views/answer.py\n--- a/django_project/lesson/views/answer.py\n+++ b/django_project/lesson/views/answer.py\n@@ -104,7 +104,7 @@\n :rtype: dict\n \"\"\"\n kwargs = super(AnswerUpdateView, self).get_form_kwargs()\n- answer = get_object_or_404(Answer, self.pk_url_kwarg)\n+ answer = get_object_or_404(Answer, pk=kwargs['instance'].pk)\n kwargs['question'] = answer.question\n return kwargs\n", "issue": "Error 500: Editing Answers.\n# Problem\r\n\r\nWhen I select the edit option for the answers on http://changelog.qgis.org/id/inasafe-realtime2/\r\nThen I get error 500.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Answer views.\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import (\n CreateView,\n DeleteView,\n UpdateView,\n)\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom braces.views import LoginRequiredMixin\n\nfrom lesson.forms.answer import AnswerForm\nfrom lesson.models.answer import Answer\nfrom lesson.models.worksheet_question import WorksheetQuestion\n\n\nclass AnswerMixin(object):\n \"\"\"Mixin class to provide standard settings for Answer.\"\"\"\n\n model = Answer\n form_class = AnswerForm\n\n\nclass AnswerCreateView(\n LoginRequiredMixin, AnswerMixin, CreateView):\n \"\"\"Create view for Answer.\"\"\"\n\n context_object_name = 'answer'\n template_name = 'create.html'\n creation_label = _('Add answer')\n\n def get_success_url(self):\n \"\"\"Define the redirect URL\n\n After successful creation of the object, the User will be redirected\n to the unapproved Version list page for the object's parent Worksheet\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n return reverse('worksheet-detail', kwargs={\n 'pk': self.object.question.worksheet.pk,\n 'section_slug': self.object.question.worksheet.section.slug,\n 'project_slug': self.object.question.worksheet.section.project.slug\n })\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype dict\n \"\"\"\n kwargs = super(AnswerCreateView, self).get_form_kwargs()\n pk = self.kwargs['question_pk']\n kwargs['question'] = get_object_or_404(WorksheetQuestion, pk=pk)\n return kwargs\n\n\n# noinspection PyAttributeOutsideInit\nclass AnswerDeleteView(\n LoginRequiredMixin,\n AnswerMixin,\n DeleteView):\n \"\"\"Delete view for Answer.\"\"\"\n\n context_object_name = 'answer'\n template_name = 'answer/delete.html'\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful deletion of the object, the User will be redirected\n to the Certifying Organisation list page\n for the object's parent Worksheet.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n return reverse('worksheet-detail', kwargs={\n 'pk': self.object.question.worksheet.pk,\n 'section_slug': self.object.question.worksheet.section.slug,\n 'project_slug': self.object.question.worksheet.section.project.slug\n })\n\n\n# noinspection PyAttributeOutsideInit\nclass AnswerUpdateView(\n LoginRequiredMixin,\n AnswerMixin,\n UpdateView):\n \"\"\"Update view for Answer.\"\"\"\n\n context_object_name = 'answer'\n template_name = 'update.html'\n update_label = _('Update answer')\n\n def get_form_kwargs(self):\n \"\"\"Get keyword arguments from form.\n\n :returns keyword argument from the form\n :rtype: dict\n \"\"\"\n kwargs = super(AnswerUpdateView, self).get_form_kwargs()\n answer = get_object_or_404(Answer, self.pk_url_kwarg)\n kwargs['question'] = answer.question\n return kwargs\n\n def get_success_url(self):\n \"\"\"Define the redirect URL.\n\n After successful update of the object, the User will be redirected to\n the specification list page for the object's parent Worksheet.\n\n :returns: URL\n :rtype: HttpResponse\n \"\"\"\n return reverse('worksheet-detail', kwargs={\n 'pk': self.object.question.worksheet.pk,\n 'section_slug': self.object.question.worksheet.section.slug,\n 'project_slug': self.object.question.worksheet.section.project.slug\n })\n", "path": "django_project/lesson/views/answer.py"}]} | 1,631 | 141 |
gh_patches_debug_22768 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1384 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[PS-1]Add new RPC services definition in elasticdl.proto according to PS design
[PS design](https://github.com/sql-machine-learning/elasticdl/blob/develop/docs/designs/ps_design.md#rpc-definition) adds some new RPC services.
</issue>
<code>
[start of elasticdl/python/ps/servicer.py]
1 from google.protobuf import empty_pb2
2
3 from elasticdl.proto import elasticdl_pb2_grpc
4
5
6 class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
7 """PS service implementation"""
8
9 def __init__(
10 self,
11 parameters,
12 grads_to_wait,
13 optimizer,
14 lr_staleness_modulation=False,
15 use_async=False,
16 ):
17 self._parameters = parameters
18 self._grads_to_wait = grads_to_wait
19 self._optimizer = optimizer
20 self._lr_staleness_modulation = lr_staleness_modulation
21 self._use_async = use_async
22 self._version = 0
23
24 def pull_variable(self, request, _):
25 # TODO: implement this RPC service
26 return empty_pb2.Empty()
27
28 def pull_embedding_vector(self, request, _):
29 # TODO: implement this RPC service
30 return empty_pb2.Empty()
31
32 def push_model(self, request, _):
33 # TODO: implement this RPC service
34 return empty_pb2.Empty()
35
36 def push_gradient(self, request, _):
37 # TODO: implement this RPC service
38 return empty_pb2.Empty()
39
[end of elasticdl/python/ps/servicer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py
--- a/elasticdl/python/ps/servicer.py
+++ b/elasticdl/python/ps/servicer.py
@@ -1,6 +1,6 @@
from google.protobuf import empty_pb2
-from elasticdl.proto import elasticdl_pb2_grpc
+from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc
class PserverServicer(elasticdl_pb2_grpc.PserverServicer):
@@ -23,11 +23,11 @@
def pull_variable(self, request, _):
# TODO: implement this RPC service
- return empty_pb2.Empty()
+ return elasticdl_pb2.PullVariableResponse()
def pull_embedding_vector(self, request, _):
# TODO: implement this RPC service
- return empty_pb2.Empty()
+ return elasticdl_pb2.Tensor()
def push_model(self, request, _):
# TODO: implement this RPC service
@@ -35,4 +35,4 @@
def push_gradient(self, request, _):
# TODO: implement this RPC service
- return empty_pb2.Empty()
+ return elasticdl_pb2.PushGradientResponse()
| {"golden_diff": "diff --git a/elasticdl/python/ps/servicer.py b/elasticdl/python/ps/servicer.py\n--- a/elasticdl/python/ps/servicer.py\n+++ b/elasticdl/python/ps/servicer.py\n@@ -1,6 +1,6 @@\n from google.protobuf import empty_pb2\n \n-from elasticdl.proto import elasticdl_pb2_grpc\n+from elasticdl.proto import elasticdl_pb2, elasticdl_pb2_grpc\n \n \n class PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n@@ -23,11 +23,11 @@\n \n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n- return empty_pb2.Empty()\n+ return elasticdl_pb2.PullVariableResponse()\n \n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n- return empty_pb2.Empty()\n+ return elasticdl_pb2.Tensor()\n \n def push_model(self, request, _):\n # TODO: implement this RPC service\n@@ -35,4 +35,4 @@\n \n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n- return empty_pb2.Empty()\n+ return elasticdl_pb2.PushGradientResponse()\n", "issue": "[PS-1]Add new RPC services definition in elasticdl.proto according to PS design\n[PS design](https://github.com/sql-machine-learning/elasticdl/blob/develop/docs/designs/ps_design.md#rpc-definition) adds some new RPC services.\n", "before_files": [{"content": "from google.protobuf import empty_pb2\n\nfrom elasticdl.proto import elasticdl_pb2_grpc\n\n\nclass PserverServicer(elasticdl_pb2_grpc.PserverServicer):\n \"\"\"PS service implementation\"\"\"\n\n def __init__(\n self,\n parameters,\n grads_to_wait,\n optimizer,\n lr_staleness_modulation=False,\n use_async=False,\n ):\n self._parameters = parameters\n self._grads_to_wait = grads_to_wait\n self._optimizer = optimizer\n self._lr_staleness_modulation = lr_staleness_modulation\n self._use_async = use_async\n self._version = 0\n\n def pull_variable(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n\n def pull_embedding_vector(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n\n def push_model(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n\n def push_gradient(self, request, _):\n # TODO: implement this RPC service\n return empty_pb2.Empty()\n", "path": "elasticdl/python/ps/servicer.py"}]} | 917 | 281 |
gh_patches_debug_12962 | rasdani/github-patches | git_diff | mkdocs__mkdocs-615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only creating wheels for Python 2.7
Seems I didn't set something up correctly. It looks like this is a limitation of `setup.py bdist_wheel`
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 from __future__ import print_function
5 from setuptools import setup
6 import re
7 import os
8 import sys
9
10 PY26 = sys.version_info[:2] == (2, 6)
11
12
13 long_description = (
14 "MkDocs is a fast, simple and downright gorgeous static site generator "
15 "that's geared towards building project documentation. Documentation "
16 "source files are written in Markdown, and configured with a single YAML "
17 "configuration file."
18 )
19
20
21 def get_version(package):
22 """Return package version as listed in `__version__` in `init.py`."""
23 init_py = open(os.path.join(package, '__init__.py')).read()
24 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
25
26
27 def get_packages(package):
28 """Return root package and all sub-packages."""
29 return [dirpath
30 for dirpath, dirnames, filenames in os.walk(package)
31 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
32
33
34 def get_package_data(package):
35 """
36 Return all files under the root package, that are not in a
37 package themselves.
38 """
39 walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
40 for dirpath, dirnames, filenames in os.walk(package)
41 if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
42
43 filepaths = []
44 for base, filenames in walk:
45 filepaths.extend([os.path.join(base, filename)
46 for filename in filenames])
47 return {package: filepaths}
48
49 setup(
50 name="mkdocs",
51 version=get_version("mkdocs"),
52 url='http://www.mkdocs.org',
53 license='BSD',
54 description='Project documentation with Markdown.',
55 long_description=long_description,
56 author='Tom Christie',
57 author_email='[email protected]', # SEE NOTE BELOW (*)
58 packages=get_packages("mkdocs"),
59 package_data=get_package_data("mkdocs"),
60 install_requires=[
61 'click>=4.0',
62 'Jinja2>=2.7.1',
63 'livereload>=2.3.2',
64 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',
65 'PyYAML>=3.10',
66 'tornado>=4.1',
67 ],
68 entry_points={
69 'console_scripts': [
70 'mkdocs = mkdocs.cli:cli',
71 ],
72 },
73 classifiers=[
74 'Development Status :: 5 - Production/Stable',
75 'Environment :: Console',
76 'Environment :: Web Environment',
77 'Intended Audience :: Developers',
78 'License :: OSI Approved :: BSD License',
79 'Operating System :: OS Independent',
80 'Programming Language :: Python',
81 'Programming Language :: Python :: 2',
82 'Programming Language :: Python :: 2.6',
83 'Programming Language :: Python :: 2.7',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.3',
86 'Programming Language :: Python :: 3.4',
87 "Programming Language :: Python :: Implementation :: CPython",
88 'Topic :: Documentation',
89 'Topic :: Text Processing',
90 ],
91 zip_safe=False
92 )
93
94 # (*) Please direct queries to the discussion group:
95 # https://groups.google.com/forum/#!forum/mkdocs
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,6 +46,22 @@
for filename in filenames])
return {package: filepaths}
+
+if sys.argv[-1] == 'publish':
+ if os.system("pip freeze | grep wheel"):
+ print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
+ sys.exit()
+ if os.system("pip freeze | grep twine"):
+ print("twine not installed.\nUse `pip install twine`.\nExiting.")
+ sys.exit()
+ os.system("python setup.py sdist bdist_wheel")
+ os.system("twine upload dist/*")
+ print("You probably want to also tag the version now:")
+ print(" git tag -a {0} -m 'version {0}'".format(get_version("mkdocs")))
+ print(" git push --tags")
+ sys.exit()
+
+
setup(
name="mkdocs",
version=get_version("mkdocs"),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,6 +46,22 @@\n for filename in filenames])\n return {package: filepaths}\n \n+\n+if sys.argv[-1] == 'publish':\n+ if os.system(\"pip freeze | grep wheel\"):\n+ print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n+ sys.exit()\n+ if os.system(\"pip freeze | grep twine\"):\n+ print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n+ sys.exit()\n+ os.system(\"python setup.py sdist bdist_wheel\")\n+ os.system(\"twine upload dist/*\")\n+ print(\"You probably want to also tag the version now:\")\n+ print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n+ print(\" git push --tags\")\n+ sys.exit()\n+\n+\n setup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n", "issue": "Only creating wheels for Python 2.7\nSeems I didn't set something up correctly. It looks like this is a limitation of `setup.py bdist_wheel`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\nPY26 = sys.version_info[:2] == (2, 6)\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\ndef get_package_data(package):\n \"\"\"\n Return all files under the root package, that are not in a\n package themselves.\n \"\"\"\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)\n if not os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return {package: filepaths}\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='http://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n package_data=get_package_data(\"mkdocs\"),\n install_requires=[\n 'click>=4.0',\n 'Jinja2>=2.7.1',\n 'livereload>=2.3.2',\n 'Markdown>=2.3.1,<2.5' if PY26 else 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=4.1',\n ],\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.cli:cli',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n \"Programming Language :: Python :: Implementation :: CPython\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py"}]} | 1,492 | 235 |
gh_patches_debug_26209 | rasdani/github-patches | git_diff | Cog-Creators__Red-DiscordBot-3166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[p]announce fails if bot belongs to team
# Command bugs
#### Command name
`announce`
#### What cog is this command from?
`Admin`
#### What were you expecting to happen?
Send announcement to all enabled servers, if failed, send message to the one of owners or all owners (like an `[p]contact`)
#### What actually happened?
announcement failed almost immediately with error in console
#### How can we reproduce this issue?
1. Set bot with token belonging to team
2. Create environment, where bot cant send announcement to server
3. Announce an message
4. `[p]announce` silently fails with error:
```py
Traceback (most recent call last):
File "/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py", line 67, in announcer
await channel.send(self.message)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py", line 823, in send
data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py", line 218, in request
raise Forbidden(r, data)
discord.errors.Forbidden: 403 FORBIDDEN (error code: 50001): Missing Access
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py", line 70, in announcer
_("I could not announce to server: {server.id}").format(server=g)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py", line 823, in send
data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)
File "/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py", line 218, in request
raise Forbidden(r, data)
discord.errors.Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user
```
Caused by https://github.com/Cog-Creators/Red-DiscordBot/blob/f0836d7182d99239d1fde24cf2231c6ebf206f72/redbot/cogs/admin/announcer.py#L56
*Kinda related to #2781, i guess*
</issue>
<code>
[start of redbot/cogs/admin/announcer.py]
1 import asyncio
2
3 import discord
4 from redbot.core import commands
5 from redbot.core.i18n import Translator
6
7 _ = Translator("Announcer", __file__)
8
9
10 class Announcer:
11 def __init__(self, ctx: commands.Context, message: str, config=None):
12 """
13 :param ctx:
14 :param message:
15 :param config: Used to determine channel overrides
16 """
17 self.ctx = ctx
18 self.message = message
19 self.config = config
20
21 self.active = None
22
23 def start(self):
24 """
25 Starts an announcement.
26 :return:
27 """
28 if self.active is None:
29 self.active = True
30 self.ctx.bot.loop.create_task(self.announcer())
31
32 def cancel(self):
33 """
34 Cancels a running announcement.
35 :return:
36 """
37 self.active = False
38
39 async def _get_announce_channel(self, guild: discord.Guild) -> discord.TextChannel:
40 channel_id = await self.config.guild(guild).announce_channel()
41 channel = None
42
43 if channel_id is not None:
44 channel = guild.get_channel(channel_id)
45
46 if channel is None:
47 channel = guild.system_channel
48
49 if channel is None:
50 channel = guild.text_channels[0]
51
52 return channel
53
54 async def announcer(self):
55 guild_list = self.ctx.bot.guilds
56 bot_owner = (await self.ctx.bot.application_info()).owner
57 for g in guild_list:
58 if not self.active:
59 return
60
61 if await self.config.guild(g).announce_ignore():
62 continue
63
64 channel = await self._get_announce_channel(g)
65
66 try:
67 await channel.send(self.message)
68 except discord.Forbidden:
69 await bot_owner.send(
70 _("I could not announce to server: {server.id}").format(server=g)
71 )
72 await asyncio.sleep(0.5)
73
74 self.active = False
75
[end of redbot/cogs/admin/announcer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/redbot/cogs/admin/announcer.py b/redbot/cogs/admin/announcer.py
--- a/redbot/cogs/admin/announcer.py
+++ b/redbot/cogs/admin/announcer.py
@@ -3,6 +3,7 @@
import discord
from redbot.core import commands
from redbot.core.i18n import Translator
+from redbot.core.utils.chat_formatting import humanize_list, inline
_ = Translator("Announcer", __file__)
@@ -53,7 +54,7 @@
async def announcer(self):
guild_list = self.ctx.bot.guilds
- bot_owner = (await self.ctx.bot.application_info()).owner
+ failed = []
for g in guild_list:
if not self.active:
return
@@ -66,9 +67,14 @@
try:
await channel.send(self.message)
except discord.Forbidden:
- await bot_owner.send(
- _("I could not announce to server: {server.id}").format(server=g)
- )
+ failed.append(str(g.id))
await asyncio.sleep(0.5)
+ msg = (
+ _("I could not announce to the following server: ")
+ if len(failed) == 1
+ else _("I could not announce to the following servers: ")
+ )
+ msg += humanize_list(tuple(map(inline, failed)))
+ await self.ctx.bot.send_to_owners(msg)
self.active = False
| {"golden_diff": "diff --git a/redbot/cogs/admin/announcer.py b/redbot/cogs/admin/announcer.py\n--- a/redbot/cogs/admin/announcer.py\n+++ b/redbot/cogs/admin/announcer.py\n@@ -3,6 +3,7 @@\n import discord\n from redbot.core import commands\n from redbot.core.i18n import Translator\n+from redbot.core.utils.chat_formatting import humanize_list, inline\n \n _ = Translator(\"Announcer\", __file__)\n \n@@ -53,7 +54,7 @@\n \n async def announcer(self):\n guild_list = self.ctx.bot.guilds\n- bot_owner = (await self.ctx.bot.application_info()).owner\n+ failed = []\n for g in guild_list:\n if not self.active:\n return\n@@ -66,9 +67,14 @@\n try:\n await channel.send(self.message)\n except discord.Forbidden:\n- await bot_owner.send(\n- _(\"I could not announce to server: {server.id}\").format(server=g)\n- )\n+ failed.append(str(g.id))\n await asyncio.sleep(0.5)\n \n+ msg = (\n+ _(\"I could not announce to the following server: \")\n+ if len(failed) == 1\n+ else _(\"I could not announce to the following servers: \")\n+ )\n+ msg += humanize_list(tuple(map(inline, failed)))\n+ await self.ctx.bot.send_to_owners(msg)\n self.active = False\n", "issue": "[p]announce fails if bot belongs to team\n# Command bugs\r\n\r\n#### Command name\r\n\r\n`announce`\r\n\r\n#### What cog is this command from?\r\n\r\n`Admin`\r\n\r\n#### What were you expecting to happen?\r\n\r\nSend announcement to all enabled servers, if failed, send message to the one of owners or all owners (like an `[p]contact`)\r\n\r\n#### What actually happened?\r\n\r\nannouncement failed almost immediately with error in console \r\n\r\n#### How can we reproduce this issue?\r\n\r\n1. Set bot with token belonging to team\r\n2. Create environment, where bot cant send announcement to server\r\n3. Announce an message\r\n4. `[p]announce` silently fails with error:\r\n```py\r\nTraceback (most recent call last):\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py\", line 67, in announcer\r\n await channel.send(self.message)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py\", line 823, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py\", line 218, in request\r\n raise Forbidden(r, data)\r\ndiscord.errors.Forbidden: 403 FORBIDDEN (error code: 50001): Missing Access\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/redbot/cogs/admin/announcer.py\", line 70, in announcer\r\n _(\"I could not announce to server: {server.id}\").format(server=g)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/abc.py\", line 823, in send\r\n data = await state.http.send_message(channel.id, content, tts=tts, embed=embed, nonce=nonce)\r\n File \"/home/fixator/Red-V3/lib/python3.7/site-packages/discord/http.py\", line 218, in request\r\n raise Forbidden(r, data)\r\ndiscord.errors.Forbidden: 403 FORBIDDEN (error code: 50007): Cannot send messages to this user\r\n```\r\n\r\nCaused by https://github.com/Cog-Creators/Red-DiscordBot/blob/f0836d7182d99239d1fde24cf2231c6ebf206f72/redbot/cogs/admin/announcer.py#L56\r\n\r\n*Kinda related to #2781, i guess*\n", "before_files": [{"content": "import asyncio\n\nimport discord\nfrom redbot.core import commands\nfrom redbot.core.i18n import Translator\n\n_ = Translator(\"Announcer\", __file__)\n\n\nclass Announcer:\n def __init__(self, ctx: commands.Context, message: str, config=None):\n \"\"\"\n :param ctx:\n :param message:\n :param config: Used to determine channel overrides\n \"\"\"\n self.ctx = ctx\n self.message = message\n self.config = config\n\n self.active = None\n\n def start(self):\n \"\"\"\n Starts an announcement.\n :return:\n \"\"\"\n if self.active is None:\n self.active = True\n self.ctx.bot.loop.create_task(self.announcer())\n\n def cancel(self):\n \"\"\"\n Cancels a running announcement.\n :return:\n \"\"\"\n self.active = False\n\n async def _get_announce_channel(self, guild: discord.Guild) -> discord.TextChannel:\n channel_id = await self.config.guild(guild).announce_channel()\n channel = None\n\n if channel_id is not None:\n channel = guild.get_channel(channel_id)\n\n if channel is None:\n channel = guild.system_channel\n\n if channel is None:\n channel = guild.text_channels[0]\n\n return channel\n\n async def announcer(self):\n guild_list = self.ctx.bot.guilds\n bot_owner = (await self.ctx.bot.application_info()).owner\n for g in guild_list:\n if not self.active:\n return\n\n if await self.config.guild(g).announce_ignore():\n continue\n\n channel = await self._get_announce_channel(g)\n\n try:\n await channel.send(self.message)\n except discord.Forbidden:\n await bot_owner.send(\n _(\"I could not announce to server: {server.id}\").format(server=g)\n )\n await asyncio.sleep(0.5)\n\n self.active = False\n", "path": "redbot/cogs/admin/announcer.py"}]} | 1,703 | 330 |
gh_patches_debug_41 | rasdani/github-patches | git_diff | streamlit__streamlit-3038 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dark theme does not properly adjust markdown tables
### Summary
When I load the latest streamlit in darkmode I cannot see anything in my markdown tables because the text color is changed but not the background color.
### Steps to reproduce
Code snippet:
```
md = """
| Label | Info |
| -------- | --------- |
| Row | Data |
"""
st.markdown(md)
```
**Expected behavior:**
I would expect if the text color get changed to white in the table, the background color should get changed to something dark
**Actual behavior:**
Both the text color and background are white so nothing can be seen.
### Is this a regression?
no, consequence of new theme
### Debug info
- Streamlit version: 0.79.0
- Python version: 3.7.9
- pip
- OS version: MacOS Catalina 10.15.7
- Browser version: Chrome 89.0.4389.90
### Additional information
I'm not sure why markdown tables have different background style but they seem to; perhaps other ui elements would be affected as well.
</issue>
<code>
[start of e2e/scripts/st_markdown.py]
1 # Copyright 2018-2021 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 st.markdown("This **markdown** is awesome! :sunglasses:")
18
19 st.markdown("This <b>HTML tag</b> is escaped!")
20
21 st.markdown("This <b>HTML tag</b> is not escaped!", unsafe_allow_html=True)
22
23 st.markdown("[text]")
24
25 st.markdown("[link](href)")
26
27 st.markdown("[][]")
28
29 st.markdown("Inline math with $\KaTeX$")
30
31 st.markdown(
32 """
33 $$
34 ax^2 + bx + c = 0
35 $$
36 """
37 )
38
[end of e2e/scripts/st_markdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/e2e/scripts/st_markdown.py b/e2e/scripts/st_markdown.py
--- a/e2e/scripts/st_markdown.py
+++ b/e2e/scripts/st_markdown.py
@@ -35,3 +35,11 @@
$$
"""
)
+
+st.markdown(
+ """
+| Col1 | Col2 |
+| --------- | ----------- |
+| Some | Data |
+"""
+)
| {"golden_diff": "diff --git a/e2e/scripts/st_markdown.py b/e2e/scripts/st_markdown.py\n--- a/e2e/scripts/st_markdown.py\n+++ b/e2e/scripts/st_markdown.py\n@@ -35,3 +35,11 @@\n $$\n \"\"\"\n )\n+\n+st.markdown(\n+ \"\"\"\n+| Col1 | Col2 |\n+| --------- | ----------- |\n+| Some | Data |\n+\"\"\"\n+)\n", "issue": "Dark theme does not properly adjust markdown tables\n### Summary\r\n\r\nWhen I load the latest streamlit in darkmode I cannot see anything in my markdown tables because the text color is changed but not the background color.\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nmd = \"\"\"\r\n| Label | Info |\r\n| -------- | --------- |\r\n| Row | Data |\r\n\"\"\"\r\nst.markdown(md)\r\n```\r\n\r\n**Expected behavior:**\r\n\r\nI would expect if the text color get changed to white in the table, the background color should get changed to something dark\r\n\r\n**Actual behavior:**\r\n\r\nBoth the text color and background are white so nothing can be seen.\r\n\r\n### Is this a regression?\r\n\r\nno, consequence of new theme\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 0.79.0\r\n- Python version: 3.7.9\r\n- pip\r\n- OS version: MacOS Catalina 10.15.7\r\n- Browser version: Chrome 89.0.4389.90\r\n\r\n### Additional information\r\n\r\nI'm not sure why markdown tables have different background style but they seem to; perhaps other ui elements would be affected as well.\r\n\n", "before_files": [{"content": "# Copyright 2018-2021 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\nst.markdown(\"This **markdown** is awesome! :sunglasses:\")\n\nst.markdown(\"This <b>HTML tag</b> is escaped!\")\n\nst.markdown(\"This <b>HTML tag</b> is not escaped!\", unsafe_allow_html=True)\n\nst.markdown(\"[text]\")\n\nst.markdown(\"[link](href)\")\n\nst.markdown(\"[][]\")\n\nst.markdown(\"Inline math with $\\KaTeX$\")\n\nst.markdown(\n \"\"\"\n$$\nax^2 + bx + c = 0\n$$\n\"\"\"\n)\n", "path": "e2e/scripts/st_markdown.py"}]} | 1,111 | 99 |
gh_patches_debug_6084 | rasdani/github-patches | git_diff | bridgecrewio__checkov-107 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checkov fails to start in Windows environments
**Describe the bug**
After you install Checkov on Windows, running Checkov does nothing.
**To Reproduce**
Steps to reproduce the behavior:
1. Open Powershell/cmd
2. Run cli command 'checkov'
3. Does nothing
**Expected behavior**
The tool running. Magic.
**Screenshots**
I'm not sure showing nothing would help.
**Desktop (please complete the following information):**
- OS: Windows 10
- Checkov Version 1.0.173
**Additional context**
I know Windows! Like who cares and tbh ive got WSL2 and it works a dream but customers, customers and their awful locked down... anyway.
I'm using Python37 where i've installed .
If you look in your c:/Python37/scripts folder there is a "checkov" bash script. This is the nub of it this doesn't run! However if you add a batch file "checkov-scan.bat" [or call whatever} with this content:
```cmd
C:\Python37\python C:\Python37\Lib\site-packages\checkov\main.py %1 %2
```
Then when you run "checkov-scan" at your shell, it works! So is there anyway you could package up something similar in a release? please?
Also I made a python based pre-commit for checkov called checkov-scan - here <https://github.com/JamesWoolfenden/pre-commit>
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "alabaster==0.7.12",
28 "attrs==19.3.0",
29 "babel==2.7.0",
30 "certifi==2019.11.28",
31 "chardet==3.0.4",
32 "coverage==4.5.4",
33 "coverage-badge==1.0.1",
34 "detect-secrets==0.13.0",
35 "docopt==0.6.2",
36 "docutils==0.15.2",
37 "idna==2.8",
38 "imagesize==1.1.0",
39 "importlib-metadata==1.1.0; python_version < '3.8'",
40 "jinja2==2.10.3",
41 "lark-parser==0.7.8",
42 "markupsafe==1.1.1",
43 "more-itertools==8.0.0",
44 "packaging==19.2",
45 "pluggy==0.13.1",
46 "py==1.8.0",
47 "pygments==2.5.2",
48 "pyparsing==2.4.5",
49 "pytest==5.3.1",
50 "python-hcl2==0.2.0",
51 "pytz==2019.3",
52 "pyyaml==5.1.2",
53 "requests==2.22.0",
54 "six==1.13.0",
55 "snowballstemmer==2.0.0",
56 "sphinx==2.2.1",
57 "sphinxcontrib-applehelp==1.0.1",
58 "sphinxcontrib-devhelp==1.0.1",
59 "sphinxcontrib-htmlhelp==1.0.2",
60 "sphinxcontrib-jsmath==1.0.1",
61 "sphinxcontrib-qthelp==1.0.2",
62 "sphinxcontrib-serializinghtml==1.1.3",
63 "urllib3==1.25.7",
64 "wcwidth==0.1.7",
65 "zipp==0.6.0",
66 ]
67 },
68 install_requires=[
69 "chardet==3.0.4",
70 "colorama==0.4.3",
71 "docopt==0.6.2",
72 "idna==2.8",
73 "junit-xml==1.8",
74 "lark-parser==0.7.8",
75 "python-hcl2==0.2.0",
76 "pyyaml==5.2",
77 "requests==2.22.0",
78 "six==1.13.0",
79 "tabulate==0.8.6",
80 "termcolor==1.1.0",
81 "urllib3==1.25.7",
82 "dpath==1.5.0"
83 ],
84 license="Apache License 2.0",
85 name="checkov",
86 version=version,
87 description="Infrastructure as code static analysis",
88 author="bridgecrew",
89 author_email="[email protected]",
90 url="https://github.com/bridgecrewio/checkov",
91 packages=setuptools.find_packages(exclude=["tests*"]),
92 scripts=["bin/checkov"],
93 long_description=long_description,
94 long_description_content_type="text/markdown",
95 classifiers=[
96 'Environment :: Console',
97 'Intended Audience :: Developers',
98 'Intended Audience :: System Administrators',
99 'Programming Language :: Python :: 3.6',
100 'Programming Language :: Python :: 3.7',
101 'Topic :: Security',
102 'Topic :: Software Development :: Build Tools'
103 ]
104 )
105
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -89,7 +89,7 @@
author_email="[email protected]",
url="https://github.com/bridgecrewio/checkov",
packages=setuptools.find_packages(exclude=["tests*"]),
- scripts=["bin/checkov"],
+ scripts=["bin/checkov","bin/checkov.bat"],
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -89,7 +89,7 @@\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n- scripts=[\"bin/checkov\"],\n+ scripts=[\"bin/checkov\",\"bin/checkov.bat\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n", "issue": "Checkov fails to start in Windows environments \n**Describe the bug**\r\nAfter you install Checkov on Windows, running Checkov does nothing.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Open Powershell/cmd\r\n2. Run cli command 'checkov'\r\n3. Does nothing\r\n\r\n**Expected behavior**\r\nThe tool running. Magic.\r\n\r\n**Screenshots**\r\nI'm not sure showing nothing would help.\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 10\r\n - Checkov Version 1.0.173\r\n\r\n**Additional context**\r\nI know Windows! Like who cares and tbh ive got WSL2 and it works a dream but customers, customers and their awful locked down... anyway.\r\nI'm using Python37 where i've installed .\r\nIf you look in your c:/Python37/scripts folder there is a \"checkov\" bash script. This is the nub of it this doesn't run! However if you add a batch file \"checkov-scan.bat\" [or call whatever} with this content:\r\n```cmd\r\nC:\\Python37\\python C:\\Python37\\Lib\\site-packages\\checkov\\main.py %1 %2\r\n```\r\nThen when you run \"checkov-scan\" at your shell, it works! So is there anyway you could package up something similar in a release? please? \r\nAlso I made a python based pre-commit for checkov called checkov-scan - here <https://github.com/JamesWoolfenden/pre-commit>\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"alabaster==0.7.12\",\n \"attrs==19.3.0\",\n \"babel==2.7.0\",\n \"certifi==2019.11.28\",\n \"chardet==3.0.4\",\n \"coverage==4.5.4\",\n \"coverage-badge==1.0.1\",\n \"detect-secrets==0.13.0\",\n \"docopt==0.6.2\",\n \"docutils==0.15.2\",\n \"idna==2.8\",\n \"imagesize==1.1.0\",\n \"importlib-metadata==1.1.0; python_version < '3.8'\",\n \"jinja2==2.10.3\",\n \"lark-parser==0.7.8\",\n \"markupsafe==1.1.1\",\n \"more-itertools==8.0.0\",\n \"packaging==19.2\",\n \"pluggy==0.13.1\",\n \"py==1.8.0\",\n \"pygments==2.5.2\",\n \"pyparsing==2.4.5\",\n \"pytest==5.3.1\",\n \"python-hcl2==0.2.0\",\n \"pytz==2019.3\",\n \"pyyaml==5.1.2\",\n \"requests==2.22.0\",\n \"six==1.13.0\",\n \"snowballstemmer==2.0.0\",\n \"sphinx==2.2.1\",\n \"sphinxcontrib-applehelp==1.0.1\",\n \"sphinxcontrib-devhelp==1.0.1\",\n \"sphinxcontrib-htmlhelp==1.0.2\",\n \"sphinxcontrib-jsmath==1.0.1\",\n \"sphinxcontrib-qthelp==1.0.2\",\n \"sphinxcontrib-serializinghtml==1.1.3\",\n \"urllib3==1.25.7\",\n \"wcwidth==0.1.7\",\n \"zipp==0.6.0\",\n ]\n },\n install_requires=[\n \"chardet==3.0.4\",\n \"colorama==0.4.3\",\n \"docopt==0.6.2\",\n \"idna==2.8\",\n \"junit-xml==1.8\",\n \"lark-parser==0.7.8\",\n \"python-hcl2==0.2.0\",\n \"pyyaml==5.2\",\n \"requests==2.22.0\",\n \"six==1.13.0\",\n \"tabulate==0.8.6\",\n \"termcolor==1.1.0\",\n \"urllib3==1.25.7\",\n \"dpath==1.5.0\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\"]),\n scripts=[\"bin/checkov\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: Software Development :: Build Tools'\n ]\n)\n", "path": "setup.py"}]} | 2,030 | 110 |
gh_patches_debug_21881 | rasdani/github-patches | git_diff | google__TensorNetwork-263 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ncon_interface tests fail
</issue>
<code>
[start of conftest.py]
1 # Copyright 2019 The TensorNetwork Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 from __future__ import division
17 from __future__ import print_function
18 import pytest
19
20
21 @pytest.fixture(name="backend", params=["numpy", "tensorflow",
22 "jax", "pytorch"])
23 def backend_fixure(request):
24 return request.param
25
[end of conftest.py]
[start of tensornetwork/__init__.py]
1 from __future__ import absolute_import
2 from tensornetwork.network import TensorNetwork
3 from tensornetwork.network_components import Node, Edge, CopyNode
4 from tensornetwork.ncon_interface import ncon, ncon_network
5 from tensornetwork.version import __version__
6 from tensornetwork.visualization.graphviz import to_graphviz
7 from tensornetwork import contractors
8 from tensornetwork import config
9 from typing import Text, Optional, Type
10 from tensornetwork.utils import load
11
12
13 def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:
14 config.default_backend = backend
15 config.default_dype = dtype
16
[end of tensornetwork/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -16,9 +16,33 @@
from __future__ import division
from __future__ import print_function
import pytest
+import jax
+import tensornetwork
+import tensorflow as tf
@pytest.fixture(name="backend", params=["numpy", "tensorflow",
"jax", "pytorch"])
def backend_fixure(request):
return request.param
+
+
[email protected](autouse=True)
+def reset_default_backend():
+ tensornetwork.set_default_backend("numpy")
+ yield
+ tensornetwork.set_default_backend("numpy")
+
+
[email protected](autouse=True)
+def enable_jax_64():
+ jax.config.update("jax_enable_x64", True)
+ yield
+ jax.config.update("jax_enable_x64", True)
+
+
[email protected](autouse=True)
+def tf_enable_v2_behaviour():
+ tf.compat.v1.enable_v2_behavior()
+ yield
+ tf.compat.v1.enable_v2_behavior()
diff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py
--- a/tensornetwork/__init__.py
+++ b/tensornetwork/__init__.py
@@ -12,4 +12,4 @@
def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:
config.default_backend = backend
- config.default_dype = dtype
+ config.default_dtype = dtype
| {"golden_diff": "diff --git a/conftest.py b/conftest.py\n--- a/conftest.py\n+++ b/conftest.py\n@@ -16,9 +16,33 @@\n from __future__ import division\n from __future__ import print_function\n import pytest\n+import jax\n+import tensornetwork\n+import tensorflow as tf\n \n \n @pytest.fixture(name=\"backend\", params=[\"numpy\", \"tensorflow\",\n \"jax\", \"pytorch\"])\n def backend_fixure(request):\n return request.param\n+\n+\[email protected](autouse=True)\n+def reset_default_backend():\n+ tensornetwork.set_default_backend(\"numpy\")\n+ yield\n+ tensornetwork.set_default_backend(\"numpy\")\n+\n+\[email protected](autouse=True)\n+def enable_jax_64():\n+ jax.config.update(\"jax_enable_x64\", True)\n+ yield\n+ jax.config.update(\"jax_enable_x64\", True)\n+\n+\[email protected](autouse=True)\n+def tf_enable_v2_behaviour():\n+ tf.compat.v1.enable_v2_behavior()\n+ yield\n+ tf.compat.v1.enable_v2_behavior()\ndiff --git a/tensornetwork/__init__.py b/tensornetwork/__init__.py\n--- a/tensornetwork/__init__.py\n+++ b/tensornetwork/__init__.py\n@@ -12,4 +12,4 @@\n \n def set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:\n config.default_backend = backend\n- config.default_dype = dtype\n+ config.default_dtype = dtype\n", "issue": "ncon_interface tests fail \n\n", "before_files": [{"content": "# Copyright 2019 The TensorNetwork Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport pytest\n\n\[email protected](name=\"backend\", params=[\"numpy\", \"tensorflow\",\n \"jax\", \"pytorch\"])\ndef backend_fixure(request):\n return request.param\n", "path": "conftest.py"}, {"content": "from __future__ import absolute_import\nfrom tensornetwork.network import TensorNetwork\nfrom tensornetwork.network_components import Node, Edge, CopyNode\nfrom tensornetwork.ncon_interface import ncon, ncon_network\nfrom tensornetwork.version import __version__\nfrom tensornetwork.visualization.graphviz import to_graphviz\nfrom tensornetwork import contractors\nfrom tensornetwork import config\nfrom typing import Text, Optional, Type\nfrom tensornetwork.utils import load\n\n\ndef set_default_backend(backend: Text, dtype: Optional[Type] = None) -> None:\n config.default_backend = backend\n config.default_dype = dtype\n", "path": "tensornetwork/__init__.py"}]} | 955 | 356 |
gh_patches_debug_57271 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'async for' requires an object with __aiter__ method, got AIOTracedCursor
## Problem
Using ddtrace and aiopg, if I do:
```python
await cur.execute(query)
async for value in cur:
yield value
```
If my connection is not patched, I get:
```
TypeError: 'async for' requires an object with __aiter__ method, got AIOTracedCursor
(...)
File "path/to/my/file.py", line 241, in get_many
async for value in cur:
```
(if my connection is not patched, it works)
## Analysis
The cursor class is replaced with `AIOTracedCursor` which inherits `wrapt.ObjectProxy`.
Problem is, while thanks to `ObjectProxy`, `AIOTracedCursor().__aiter__()` would most probably work and return whatever the real proxy would return, this is not enough for Python to accept that the cursor is an iterator.
A small example with simple objects:
```python
class A():
def iter(self):
return iter([])
async def aiter(self):
return iter([])
def __getattr__(self, attr):
if attr.endswith("iter__"):
return getattr(self, attr.strip("_"))
a = A()
```
We implement `a.__iter__()` and `a.__aiter__()` but Python doesn't see it:
```
In [6]: a.__iter__()
Out[6]: <list_iterator at 0x7fdff00de860>
In [7]: a.__aiter__()
Out[7]: <coroutine object A.aiter at 0x7fdff00ddba0>
In [8]: async for e in a: print(e)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
cell_name in async-def-wrapper()
TypeError: 'async for' requires an object with __aiter__ method, got A
In [9]: iter(a)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-9-2b64cb055077> in <module>
----> 1 iter(a)
TypeError: 'A' object is not iterable
```
</issue>
<code>
[start of ddtrace/contrib/aiopg/connection.py]
1 import asyncio
2 from ddtrace.vendor import wrapt
3
4 from aiopg.utils import _ContextManager
5
6 from .. import dbapi
7 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
8 from ...ext import sql, AppTypes
9 from ...pin import Pin
10 from ...settings import config
11
12
13 class AIOTracedCursor(wrapt.ObjectProxy):
14 """ TracedCursor wraps a psql cursor and traces its queries. """
15
16 def __init__(self, cursor, pin):
17 super(AIOTracedCursor, self).__init__(cursor)
18 pin.onto(self)
19 name = pin.app or 'sql'
20 self._datadog_name = '%s.query' % name
21
22 @asyncio.coroutine
23 def _trace_method(self, method, resource, extra_tags, *args, **kwargs):
24 pin = Pin.get_from(self)
25 if not pin or not pin.enabled():
26 result = yield from method(*args, **kwargs)
27 return result
28 service = pin.service
29
30 with pin.tracer.trace(self._datadog_name, service=service,
31 resource=resource) as s:
32 s.span_type = sql.TYPE
33 s.set_tag(sql.QUERY, resource)
34 s.set_tags(pin.tags)
35 s.set_tags(extra_tags)
36
37 # set analytics sample rate
38 s.set_tag(
39 ANALYTICS_SAMPLE_RATE_KEY,
40 config.aiopg.get_analytics_sample_rate()
41 )
42
43 try:
44 result = yield from method(*args, **kwargs)
45 return result
46 finally:
47 s.set_metric('db.rowcount', self.rowcount)
48
49 @asyncio.coroutine
50 def executemany(self, query, *args, **kwargs):
51 # FIXME[matt] properly handle kwargs here. arg names can be different
52 # with different libs.
53 result = yield from self._trace_method(
54 self.__wrapped__.executemany, query, {'sql.executemany': 'true'},
55 query, *args, **kwargs)
56 return result
57
58 @asyncio.coroutine
59 def execute(self, query, *args, **kwargs):
60 result = yield from self._trace_method(
61 self.__wrapped__.execute, query, {}, query, *args, **kwargs)
62 return result
63
64 @asyncio.coroutine
65 def callproc(self, proc, args):
66 result = yield from self._trace_method(
67 self.__wrapped__.callproc, proc, {}, proc, args)
68 return result
69
70
71 class AIOTracedConnection(wrapt.ObjectProxy):
72 """ TracedConnection wraps a Connection with tracing code. """
73
74 def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor):
75 super(AIOTracedConnection, self).__init__(conn)
76 name = dbapi._get_vendor(conn)
77 db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)
78 db_pin.onto(self)
79 # wrapt requires prefix of `_self` for attributes that are only in the
80 # proxy (since some of our source objects will use `__slots__`)
81 self._self_cursor_cls = cursor_cls
82
83 def cursor(self, *args, **kwargs):
84 # unfortunately we also need to patch this method as otherwise "self"
85 # ends up being the aiopg connection object
86 coro = self._cursor(*args, **kwargs)
87 return _ContextManager(coro)
88
89 @asyncio.coroutine
90 def _cursor(self, *args, **kwargs):
91 cursor = yield from self.__wrapped__._cursor(*args, **kwargs)
92 pin = Pin.get_from(self)
93 if not pin:
94 return cursor
95 return self._self_cursor_cls(cursor, pin)
96
[end of ddtrace/contrib/aiopg/connection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py
--- a/ddtrace/contrib/aiopg/connection.py
+++ b/ddtrace/contrib/aiopg/connection.py
@@ -67,6 +67,9 @@
self.__wrapped__.callproc, proc, {}, proc, args)
return result
+ def __aiter__(self):
+ return self.__wrapped__.__aiter__()
+
class AIOTracedConnection(wrapt.ObjectProxy):
""" TracedConnection wraps a Connection with tracing code. """
| {"golden_diff": "diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py\n--- a/ddtrace/contrib/aiopg/connection.py\n+++ b/ddtrace/contrib/aiopg/connection.py\n@@ -67,6 +67,9 @@\n self.__wrapped__.callproc, proc, {}, proc, args)\n return result\n \n+ def __aiter__(self):\n+ return self.__wrapped__.__aiter__()\n+\n \n class AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n", "issue": "'async for' requires an object with __aiter__ method, got AIOTracedCursor\n## Problem\r\nUsing ddtrace and aiopg, if I do:\r\n\r\n```python\r\nawait cur.execute(query)\r\nasync for value in cur:\r\n yield value\r\n```\r\nIf my connection is not patched, I get:\r\n```\r\nTypeError: 'async for' requires an object with __aiter__ method, got AIOTracedCursor\r\n(...)\r\n File \"path/to/my/file.py\", line 241, in get_many\r\n async for value in cur:\r\n```\r\n(if my connection is not patched, it works)\r\n\r\n## Analysis\r\n\r\nThe cursor class is replaced with `AIOTracedCursor` which inherits `wrapt.ObjectProxy`.\r\n\r\nProblem is, while thanks to `ObjectProxy`, `AIOTracedCursor().__aiter__()` would most probably work and return whatever the real proxy would return, this is not enough for Python to accept that the cursor is an iterator.\r\n\r\nA small example with simple objects:\r\n```python\r\nclass A():\r\n def iter(self):\r\n return iter([])\r\n\r\n async def aiter(self):\r\n return iter([])\r\n\r\n def __getattr__(self, attr):\r\n if attr.endswith(\"iter__\"):\r\n return getattr(self, attr.strip(\"_\"))\r\na = A()\r\n```\r\nWe implement `a.__iter__()` and `a.__aiter__()` but Python doesn't see it:\r\n```\r\nIn [6]: a.__iter__() \r\nOut[6]: <list_iterator at 0x7fdff00de860>\r\n\r\nIn [7]: a.__aiter__() \r\nOut[7]: <coroutine object A.aiter at 0x7fdff00ddba0>\r\n\r\nIn [8]: async for e in a: print(e) \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\ncell_name in async-def-wrapper()\r\n\r\nTypeError: 'async for' requires an object with __aiter__ method, got A \r\n\r\nIn [9]: iter(a) \r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-9-2b64cb055077> in <module>\r\n----> 1 iter(a)\r\n\r\nTypeError: 'A' object is not iterable\r\n\r\n```\n", "before_files": [{"content": "import asyncio\nfrom ddtrace.vendor import wrapt\n\nfrom aiopg.utils import _ContextManager\n\nfrom .. import dbapi\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...ext import sql, AppTypes\nfrom ...pin import Pin\nfrom ...settings import config\n\n\nclass AIOTracedCursor(wrapt.ObjectProxy):\n \"\"\" TracedCursor wraps a psql cursor and traces its queries. \"\"\"\n\n def __init__(self, cursor, pin):\n super(AIOTracedCursor, self).__init__(cursor)\n pin.onto(self)\n name = pin.app or 'sql'\n self._datadog_name = '%s.query' % name\n\n @asyncio.coroutine\n def _trace_method(self, method, resource, extra_tags, *args, **kwargs):\n pin = Pin.get_from(self)\n if not pin or not pin.enabled():\n result = yield from method(*args, **kwargs)\n return result\n service = pin.service\n\n with pin.tracer.trace(self._datadog_name, service=service,\n resource=resource) as s:\n s.span_type = sql.TYPE\n s.set_tag(sql.QUERY, resource)\n s.set_tags(pin.tags)\n s.set_tags(extra_tags)\n\n # set analytics sample rate\n s.set_tag(\n ANALYTICS_SAMPLE_RATE_KEY,\n config.aiopg.get_analytics_sample_rate()\n )\n\n try:\n result = yield from method(*args, **kwargs)\n return result\n finally:\n s.set_metric('db.rowcount', self.rowcount)\n\n @asyncio.coroutine\n def executemany(self, query, *args, **kwargs):\n # FIXME[matt] properly handle kwargs here. arg names can be different\n # with different libs.\n result = yield from self._trace_method(\n self.__wrapped__.executemany, query, {'sql.executemany': 'true'},\n query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def execute(self, query, *args, **kwargs):\n result = yield from self._trace_method(\n self.__wrapped__.execute, query, {}, query, *args, **kwargs)\n return result\n\n @asyncio.coroutine\n def callproc(self, proc, args):\n result = yield from self._trace_method(\n self.__wrapped__.callproc, proc, {}, proc, args)\n return result\n\n\nclass AIOTracedConnection(wrapt.ObjectProxy):\n \"\"\" TracedConnection wraps a Connection with tracing code. \"\"\"\n\n def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor):\n super(AIOTracedConnection, self).__init__(conn)\n name = dbapi._get_vendor(conn)\n db_pin = pin or Pin(service=name, app=name, app_type=AppTypes.db)\n db_pin.onto(self)\n # wrapt requires prefix of `_self` for attributes that are only in the\n # proxy (since some of our source objects will use `__slots__`)\n self._self_cursor_cls = cursor_cls\n\n def cursor(self, *args, **kwargs):\n # unfortunately we also need to patch this method as otherwise \"self\"\n # ends up being the aiopg connection object\n coro = self._cursor(*args, **kwargs)\n return _ContextManager(coro)\n\n @asyncio.coroutine\n def _cursor(self, *args, **kwargs):\n cursor = yield from self.__wrapped__._cursor(*args, **kwargs)\n pin = Pin.get_from(self)\n if not pin:\n return cursor\n return self._self_cursor_cls(cursor, pin)\n", "path": "ddtrace/contrib/aiopg/connection.py"}]} | 2,017 | 130 |
gh_patches_debug_29199 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IN-AP has changed its data url
The new link is https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx (same page layout I think). Old link returns 404.
</issue>
<code>
[start of parsers/IN_AP.py]
1 #!/usr/bin/env python3
2
3 from requests import Session
4 from .lib import zonekey, IN, web
5
6
7 def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
8 """Fetch Andhra Pradesh production"""
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 zonekey.assert_zone_key(zone_key, 'IN-AP')
13
14 html = web.get_response_soup(zone_key,
15 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
16 india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
17
18 hydro_value = IN.read_value_from_span_id(html, 'lblHydel')
19 gas_value = IN.read_value_from_span_id(html, 'lblGas')
20 wind_value = IN.read_value_from_span_id(html, 'lblWind')
21 solar_value = IN.read_value_from_span_id(html, 'lblSolar')
22
23 # All thermal centrals are considered coal based production
24 # https://en.wikipedia.org/wiki/Power_sector_of_Andhra_Pradesh
25 thermal_value = IN.read_value_from_span_id(html, 'lblThermal')
26
27 cgs_value = IN.read_value_from_span_id(html, 'lblCGS')
28 ipp_value = IN.read_value_from_span_id(html, 'lblIPPS')
29
30 data = {
31 'zoneKey': zone_key,
32 'datetime': india_date.datetime,
33 'production': {
34 'biomass': 0.0,
35 'coal': thermal_value,
36 'gas': gas_value,
37 'hydro': hydro_value,
38 'nuclear': 0.0,
39 'oil': 0.0,
40 'solar': solar_value,
41 'wind': wind_value,
42 'geothermal': 0.0,
43 'unknown': round(cgs_value + ipp_value, 2)
44 },
45 'storage': {
46 'hydro': 0.0
47 },
48 'source': 'core.ap.gov.in',
49 }
50
51 return data
52
53
54 def fetch_consumption(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
55 """Fetch Andhra Pradesh consumption"""
56 if target_datetime:
57 raise NotImplementedError('This parser is not yet able to parse past dates')
58
59 zonekey.assert_zone_key(zone_key, 'IN-AP')
60
61 html = web.get_response_soup(zone_key,
62 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
63 india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
64
65 demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')
66
67 data = {
68 'zoneKey': zone_key,
69 'datetime': india_date.datetime,
70 'consumption': demand_value,
71 'source': 'core.ap.gov.in'
72 }
73
74 return data
75
76
77 if __name__ == '__main__':
78 session = Session()
79 print(fetch_production('IN-AP', session))
80 print(fetch_consumption('IN-AP', session))
81
[end of parsers/IN_AP.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/IN_AP.py b/parsers/IN_AP.py
--- a/parsers/IN_AP.py
+++ b/parsers/IN_AP.py
@@ -3,7 +3,6 @@
from requests import Session
from .lib import zonekey, IN, web
-
def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):
"""Fetch Andhra Pradesh production"""
if target_datetime:
@@ -12,7 +11,7 @@
zonekey.assert_zone_key(zone_key, 'IN-AP')
html = web.get_response_soup(zone_key,
- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
hydro_value = IN.read_value_from_span_id(html, 'lblHydel')
@@ -59,7 +58,7 @@
zonekey.assert_zone_key(zone_key, 'IN-AP')
html = web.get_response_soup(zone_key,
- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)
india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')
demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')
| {"golden_diff": "diff --git a/parsers/IN_AP.py b/parsers/IN_AP.py\n--- a/parsers/IN_AP.py\n+++ b/parsers/IN_AP.py\n@@ -3,7 +3,6 @@\n from requests import Session\n from .lib import zonekey, IN, web\n \n-\n def fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh production\"\"\"\n if target_datetime:\n@@ -12,7 +11,7 @@\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n \n html = web.get_response_soup(zone_key,\n- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n \n hydro_value = IN.read_value_from_span_id(html, 'lblHydel')\n@@ -59,7 +58,7 @@\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n \n html = web.get_response_soup(zone_key,\n- 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n+ 'https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n \n demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')\n", "issue": "IN-AP has changed its data url\nThe new link is https://core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx (same page layout I think). Old link returns 404.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom requests import Session\nfrom .lib import zonekey, IN, web\n\n\ndef fetch_production(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh production\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n\n html = web.get_response_soup(zone_key,\n 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n\n hydro_value = IN.read_value_from_span_id(html, 'lblHydel')\n gas_value = IN.read_value_from_span_id(html, 'lblGas')\n wind_value = IN.read_value_from_span_id(html, 'lblWind')\n solar_value = IN.read_value_from_span_id(html, 'lblSolar')\n\n # All thermal centrals are considered coal based production\n # https://en.wikipedia.org/wiki/Power_sector_of_Andhra_Pradesh\n thermal_value = IN.read_value_from_span_id(html, 'lblThermal')\n\n cgs_value = IN.read_value_from_span_id(html, 'lblCGS')\n ipp_value = IN.read_value_from_span_id(html, 'lblIPPS')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': india_date.datetime,\n 'production': {\n 'biomass': 0.0,\n 'coal': thermal_value,\n 'gas': gas_value,\n 'hydro': hydro_value,\n 'nuclear': 0.0,\n 'oil': 0.0,\n 'solar': solar_value,\n 'wind': wind_value,\n 'geothermal': 0.0,\n 'unknown': round(cgs_value + ipp_value, 2)\n },\n 'storage': {\n 'hydro': 0.0\n },\n 'source': 'core.ap.gov.in',\n }\n\n return data\n\n\ndef fetch_consumption(zone_key='IN-AP', session=None, target_datetime=None, logger=None):\n \"\"\"Fetch Andhra Pradesh consumption\"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n zonekey.assert_zone_key(zone_key, 'IN-AP')\n\n html = web.get_response_soup(zone_key,\n 'http://www.core.ap.gov.in/CMDashBoard/UserInterface/Power/PowerReport.aspx', session)\n india_date = IN.read_datetime_from_span_id(html, 'lblPowerStatusDate', 'DD-MM-YYYY HH:mm')\n\n demand_value = IN.read_value_from_span_id(html, 'lblGridDemand')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': india_date.datetime,\n 'consumption': demand_value,\n 'source': 'core.ap.gov.in'\n }\n\n return data\n\n\nif __name__ == '__main__':\n session = Session()\n print(fetch_production('IN-AP', session))\n print(fetch_consumption('IN-AP', session))\n", "path": "parsers/IN_AP.py"}]} | 1,419 | 358 |
gh_patches_debug_16923 | rasdani/github-patches | git_diff | Mailu__Mailu-1130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unnecessary assignment on `HOST_WEBMAIL`
We came across another piece of garbage:
https://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L19
https://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L22
</issue>
<code>
[start of core/nginx/config.py]
1 #!/usr/bin/python3
2
3 import os
4 import logging as log
5 import sys
6 from socrate import system, conf
7
8 args = os.environ.copy()
9
10 log.basicConfig(stream=sys.stderr, level=args.get("LOG_LEVEL", "WARNING"))
11
12 # Get the first DNS server
13 with open("/etc/resolv.conf") as handle:
14 content = handle.read().split()
15 args["RESOLVER"] = content[content.index("nameserver") + 1]
16
17 args["ADMIN_ADDRESS"] = system.resolve_address(args.get("HOST_ADMIN", "admin"))
18 args["ANTISPAM_ADDRESS"] = system.resolve_address(args.get("HOST_ANTISPAM", "antispam:11334"))
19 args["HOST_WEBMAIL"] = args.get("HOST_WEBMAIL", "webmail")
20 if args["WEBMAIL"] != "none":
21 args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL"))
22 args["HOST_WEBDAV"] = args.get("HOST_WEBDAV", "webdav:5232")
23 if args["WEBDAV"] != "none":
24 args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV"))
25
26 # TLS configuration
27 cert_name = os.getenv("TLS_CERT_FILENAME", default="cert.pem")
28 keypair_name = os.getenv("TLS_KEYPAIR_FILENAME", default="key.pem")
29 args["TLS"] = {
30 "cert": ("/certs/%s" % cert_name, "/certs/%s" % keypair_name),
31 "letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
32 "/certs/letsencrypt/live/mailu/privkey.pem"),
33 "mail": ("/certs/%s" % cert_name, "/certs/%s" % keypair_name),
34 "mail-letsencrypt": ("/certs/letsencrypt/live/mailu/fullchain.pem",
35 "/certs/letsencrypt/live/mailu/privkey.pem"),
36 "notls": None
37 }[args["TLS_FLAVOR"]]
38
39 if args["TLS"] and not all(os.path.exists(file_path) for file_path in args["TLS"]):
40 print("Missing cert or key file, disabling TLS")
41 args["TLS_ERROR"] = "yes"
42
43 # Build final configuration paths
44 conf.jinja("/conf/tls.conf", args, "/etc/nginx/tls.conf")
45 conf.jinja("/conf/proxy.conf", args, "/etc/nginx/proxy.conf")
46 conf.jinja("/conf/nginx.conf", args, "/etc/nginx/nginx.conf")
47 if os.path.exists("/var/run/nginx.pid"):
48 os.system("nginx -s reload")
49
[end of core/nginx/config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/nginx/config.py b/core/nginx/config.py
--- a/core/nginx/config.py
+++ b/core/nginx/config.py
@@ -16,12 +16,10 @@
args["ADMIN_ADDRESS"] = system.resolve_address(args.get("HOST_ADMIN", "admin"))
args["ANTISPAM_ADDRESS"] = system.resolve_address(args.get("HOST_ANTISPAM", "antispam:11334"))
-args["HOST_WEBMAIL"] = args.get("HOST_WEBMAIL", "webmail")
if args["WEBMAIL"] != "none":
- args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL"))
-args["HOST_WEBDAV"] = args.get("HOST_WEBDAV", "webdav:5232")
+ args["WEBMAIL_ADDRESS"] = system.resolve_address(args.get("HOST_WEBMAIL", "webmail"))
if args["WEBDAV"] != "none":
- args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV"))
+ args["WEBDAV_ADDRESS"] = system.resolve_address(args.get("HOST_WEBDAV", "webdav:5232"))
# TLS configuration
cert_name = os.getenv("TLS_CERT_FILENAME", default="cert.pem")
| {"golden_diff": "diff --git a/core/nginx/config.py b/core/nginx/config.py\n--- a/core/nginx/config.py\n+++ b/core/nginx/config.py\n@@ -16,12 +16,10 @@\n \n args[\"ADMIN_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ADMIN\", \"admin\"))\n args[\"ANTISPAM_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\n-args[\"HOST_WEBMAIL\"] = args.get(\"HOST_WEBMAIL\", \"webmail\")\n if args[\"WEBMAIL\"] != \"none\":\n- args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\"))\n-args[\"HOST_WEBDAV\"] = args.get(\"HOST_WEBDAV\", \"webdav:5232\")\n+ args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\", \"webmail\"))\n if args[\"WEBDAV\"] != \"none\":\n- args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\"))\n+ args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\", \"webdav:5232\"))\n \n # TLS configuration\n cert_name = os.getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\")\n", "issue": "Unnecessary assignment on `HOST_WEBMAIL`\nWe came across another piece of garbage:\r\n\r\nhttps://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L19\r\n\r\nhttps://github.com/Mailu/Mailu/blob/f3f0c3190be9ab9b53a29c5b0326fc9a4602df46/core/nginx/config.py#L22\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\nfrom socrate import system, conf\n\nargs = os.environ.copy()\n\nlog.basicConfig(stream=sys.stderr, level=args.get(\"LOG_LEVEL\", \"WARNING\"))\n\n# Get the first DNS server\nwith open(\"/etc/resolv.conf\") as handle:\n content = handle.read().split()\n args[\"RESOLVER\"] = content[content.index(\"nameserver\") + 1]\n\nargs[\"ADMIN_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ADMIN\", \"admin\"))\nargs[\"ANTISPAM_ADDRESS\"] = system.resolve_address(args.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\nargs[\"HOST_WEBMAIL\"] = args.get(\"HOST_WEBMAIL\", \"webmail\")\nif args[\"WEBMAIL\"] != \"none\":\n args[\"WEBMAIL_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBMAIL\"))\nargs[\"HOST_WEBDAV\"] = args.get(\"HOST_WEBDAV\", \"webdav:5232\")\nif args[\"WEBDAV\"] != \"none\":\n args[\"WEBDAV_ADDRESS\"] = system.resolve_address(args.get(\"HOST_WEBDAV\"))\n\n# TLS configuration\ncert_name = os.getenv(\"TLS_CERT_FILENAME\", default=\"cert.pem\")\nkeypair_name = os.getenv(\"TLS_KEYPAIR_FILENAME\", default=\"key.pem\")\nargs[\"TLS\"] = {\n \"cert\": (\"/certs/%s\" % cert_name, \"/certs/%s\" % keypair_name),\n \"letsencrypt\": (\"/certs/letsencrypt/live/mailu/fullchain.pem\",\n \"/certs/letsencrypt/live/mailu/privkey.pem\"),\n \"mail\": (\"/certs/%s\" % cert_name, \"/certs/%s\" % keypair_name),\n \"mail-letsencrypt\": (\"/certs/letsencrypt/live/mailu/fullchain.pem\",\n \"/certs/letsencrypt/live/mailu/privkey.pem\"),\n \"notls\": None\n}[args[\"TLS_FLAVOR\"]]\n\nif args[\"TLS\"] and not all(os.path.exists(file_path) for file_path in args[\"TLS\"]):\n print(\"Missing cert or key file, disabling TLS\")\n args[\"TLS_ERROR\"] = \"yes\"\n\n# Build final configuration paths\nconf.jinja(\"/conf/tls.conf\", args, \"/etc/nginx/tls.conf\")\nconf.jinja(\"/conf/proxy.conf\", args, \"/etc/nginx/proxy.conf\")\nconf.jinja(\"/conf/nginx.conf\", args, \"/etc/nginx/nginx.conf\")\nif os.path.exists(\"/var/run/nginx.pid\"):\n os.system(\"nginx -s reload\")\n", "path": "core/nginx/config.py"}]} | 1,303 | 273 |
gh_patches_debug_15711 | rasdani/github-patches | git_diff | translate__pootle-6087 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete a TP from an old style project and the Project page stays cached
1. Create a new TP
2. TP is available
3. Delete TP
4. Project page still shows project listed - though it should be gone
5. Going to supposedly deleted TP and we get 404
We're not expiring cache when a TP is deleted.
</issue>
<code>
[start of pootle/apps/pootle_revision/receivers.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django.db.models.signals import post_save, pre_delete
10 from django.dispatch import receiver
11
12 from pootle.core.delegate import revision_updater
13 from pootle_app.models import Directory
14 from pootle_data.models import StoreData
15 from pootle_store.models import Store
16
17
18 @receiver(post_save, sender=StoreData)
19 def handle_storedata_save(**kwargs):
20 revision_updater.get(Store)(
21 context=kwargs["instance"].store).update(keys=["stats", "checks"])
22
23
24 @receiver(post_save, sender=Directory)
25 def handle_directory_save(**kwargs):
26 context = (
27 kwargs["instance"].parent
28 if kwargs.get("created")
29 else kwargs["instance"])
30 revision_updater.get(Directory)(
31 context=context).update(keys=["stats", "checks"])
32
33
34 @receiver(pre_delete, sender=Directory)
35 def handle_directory_delete(**kwargs):
36 revision_updater.get(Directory)(
37 context=kwargs["instance"].parent).update(keys=["stats", "checks"])
38
[end of pootle/apps/pootle_revision/receivers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py
--- a/pootle/apps/pootle_revision/receivers.py
+++ b/pootle/apps/pootle_revision/receivers.py
@@ -13,6 +13,7 @@
from pootle_app.models import Directory
from pootle_data.models import StoreData
from pootle_store.models import Store
+from pootle_translationproject.models import TranslationProject
@receiver(post_save, sender=StoreData)
@@ -35,3 +36,9 @@
def handle_directory_delete(**kwargs):
revision_updater.get(Directory)(
context=kwargs["instance"].parent).update(keys=["stats", "checks"])
+
+
+@receiver(pre_delete, sender=TranslationProject)
+def handle_tp_delete(**kwargs):
+ revision_updater.get(Directory)(
+ context=kwargs["instance"].directory).update(keys=["stats", "checks"])
| {"golden_diff": "diff --git a/pootle/apps/pootle_revision/receivers.py b/pootle/apps/pootle_revision/receivers.py\n--- a/pootle/apps/pootle_revision/receivers.py\n+++ b/pootle/apps/pootle_revision/receivers.py\n@@ -13,6 +13,7 @@\n from pootle_app.models import Directory\n from pootle_data.models import StoreData\n from pootle_store.models import Store\n+from pootle_translationproject.models import TranslationProject\n \n \n @receiver(post_save, sender=StoreData)\n@@ -35,3 +36,9 @@\n def handle_directory_delete(**kwargs):\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n+\n+\n+@receiver(pre_delete, sender=TranslationProject)\n+def handle_tp_delete(**kwargs):\n+ revision_updater.get(Directory)(\n+ context=kwargs[\"instance\"].directory).update(keys=[\"stats\", \"checks\"])\n", "issue": "Delete a TP from an old style project and the Project page stays cached\n1. Create a new TP\r\n2. TP is available\r\n3. Delete TP\r\n4. Project page still shows project listed - though it should be gone\r\n5. Going to supposedly deleted TP and we get 404\r\n\r\nWe're not expiring cache when a TP is deleted.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.db.models.signals import post_save, pre_delete\nfrom django.dispatch import receiver\n\nfrom pootle.core.delegate import revision_updater\nfrom pootle_app.models import Directory\nfrom pootle_data.models import StoreData\nfrom pootle_store.models import Store\n\n\n@receiver(post_save, sender=StoreData)\ndef handle_storedata_save(**kwargs):\n revision_updater.get(Store)(\n context=kwargs[\"instance\"].store).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(post_save, sender=Directory)\ndef handle_directory_save(**kwargs):\n context = (\n kwargs[\"instance\"].parent\n if kwargs.get(\"created\")\n else kwargs[\"instance\"])\n revision_updater.get(Directory)(\n context=context).update(keys=[\"stats\", \"checks\"])\n\n\n@receiver(pre_delete, sender=Directory)\ndef handle_directory_delete(**kwargs):\n revision_updater.get(Directory)(\n context=kwargs[\"instance\"].parent).update(keys=[\"stats\", \"checks\"])\n", "path": "pootle/apps/pootle_revision/receivers.py"}]} | 970 | 216 |
gh_patches_debug_54708 | rasdani/github-patches | git_diff | qutebrowser__qutebrowser-4743 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Launching keyhint widget causes 100% usage of one CPU core
That's how it was for as long as I can remember, reproducible with all of my hardware (pressing _g_ or _;_ is enough). I don't think that's an intended behavior.
</issue>
<code>
[start of qutebrowser/misc/keyhintwidget.py]
1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
2
3 # Copyright 2016-2019 Ryan Roden-Corrent (rcorre) <[email protected]>
4 #
5 # This file is part of qutebrowser.
6 #
7 # qutebrowser is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # qutebrowser is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
19
20 """Small window that pops up to show hints for possible keystrings.
21
22 When a user inputs a key that forms a partial match, this shows a small window
23 with each possible completion of that keystring and the corresponding command.
24 It is intended to help discoverability of keybindings.
25 """
26
27 import html
28 import fnmatch
29 import re
30
31 from PyQt5.QtWidgets import QLabel, QSizePolicy
32 from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt
33
34 from qutebrowser.config import config
35 from qutebrowser.utils import utils, usertypes
36 from qutebrowser.misc import objects
37 from qutebrowser.keyinput import keyutils
38
39
40 class KeyHintView(QLabel):
41
42 """The view showing hints for key bindings based on the current key string.
43
44 Attributes:
45 _win_id: Window ID of parent.
46
47 Signals:
48 update_geometry: Emitted when this widget should be resized/positioned.
49 """
50
51 STYLESHEET = """
52 QLabel {
53 font: {{ conf.fonts.keyhint }};
54 color: {{ conf.colors.keyhint.fg }};
55 background-color: {{ conf.colors.keyhint.bg }};
56 padding: 6px;
57 {% if conf.statusbar.position == 'top' %}
58 border-bottom-right-radius: {{ conf.keyhint.radius }}px;
59 {% else %}
60 border-top-right-radius: {{ conf.keyhint.radius }}px;
61 {% endif %}
62 }
63 """
64 update_geometry = pyqtSignal()
65
66 def __init__(self, win_id, parent=None):
67 super().__init__(parent)
68 self.setTextFormat(Qt.RichText)
69 self._win_id = win_id
70 self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)
71 self.hide()
72 self._show_timer = usertypes.Timer(self, 'keyhint_show')
73 self._show_timer.timeout.connect(self.show)
74 config.set_register_stylesheet(self)
75
76 def __repr__(self):
77 return utils.get_repr(self, win_id=self._win_id)
78
79 def showEvent(self, e):
80 """Adjust the keyhint size when it's freshly shown."""
81 self.update_geometry.emit()
82 super().showEvent(e)
83
84 @pyqtSlot(str)
85 def update_keyhint(self, modename, prefix):
86 """Show hints for the given prefix (or hide if prefix is empty).
87
88 Args:
89 prefix: The current partial keystring.
90 """
91 countstr, prefix = re.fullmatch(r'(\d*)(.*)', prefix).groups()
92 if not prefix:
93 self._show_timer.stop()
94 self.hide()
95 return
96
97 def blacklisted(keychain):
98 return any(fnmatch.fnmatchcase(keychain, glob)
99 for glob in config.val.keyhint.blacklist)
100
101 def takes_count(cmdstr):
102 """Return true iff this command can take a count argument."""
103 cmdname = cmdstr.split(' ')[0]
104 cmd = objects.commands.get(cmdname)
105 return cmd and cmd.takes_count()
106
107 bindings_dict = config.key_instance.get_bindings_for(modename)
108 bindings = [(k, v) for (k, v) in sorted(bindings_dict.items())
109 if keyutils.KeySequence.parse(prefix).matches(k) and
110 not blacklisted(str(k)) and
111 (takes_count(v) or not countstr)]
112
113 if not bindings:
114 self._show_timer.stop()
115 return
116
117 # delay so a quickly typed keychain doesn't display hints
118 self._show_timer.setInterval(config.val.keyhint.delay)
119 self._show_timer.start()
120 suffix_color = html.escape(config.val.colors.keyhint.suffix.fg)
121
122 text = ''
123 for seq, cmd in bindings:
124 text += (
125 "<tr>"
126 "<td>{}</td>"
127 "<td style='color: {}'>{}</td>"
128 "<td style='padding-left: 2ex'>{}</td>"
129 "</tr>"
130 ).format(
131 html.escape(prefix),
132 suffix_color,
133 html.escape(str(seq)[len(prefix):]),
134 html.escape(cmd)
135 )
136 text = '<table>{}</table>'.format(text)
137
138 self.setText(text)
139 self.adjustSize()
140 self.update_geometry.emit()
141
[end of qutebrowser/misc/keyhintwidget.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qutebrowser/misc/keyhintwidget.py b/qutebrowser/misc/keyhintwidget.py
--- a/qutebrowser/misc/keyhintwidget.py
+++ b/qutebrowser/misc/keyhintwidget.py
@@ -71,6 +71,7 @@
self.hide()
self._show_timer = usertypes.Timer(self, 'keyhint_show')
self._show_timer.timeout.connect(self.show)
+ self._show_timer.setSingleShot(True)
config.set_register_stylesheet(self)
def __repr__(self):
| {"golden_diff": "diff --git a/qutebrowser/misc/keyhintwidget.py b/qutebrowser/misc/keyhintwidget.py\n--- a/qutebrowser/misc/keyhintwidget.py\n+++ b/qutebrowser/misc/keyhintwidget.py\n@@ -71,6 +71,7 @@\n self.hide()\n self._show_timer = usertypes.Timer(self, 'keyhint_show')\n self._show_timer.timeout.connect(self.show)\n+ self._show_timer.setSingleShot(True)\n config.set_register_stylesheet(self)\n \n def __repr__(self):\n", "issue": "Launching keyhint widget causes 100% usage of one CPU core\nThat's how it was for as long as I can remember, reproducible with all of my hardware (pressing _g_ or _;_ is enough). I don't think that's an intended behavior.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2016-2019 Ryan Roden-Corrent (rcorre) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Small window that pops up to show hints for possible keystrings.\n\nWhen a user inputs a key that forms a partial match, this shows a small window\nwith each possible completion of that keystring and the corresponding command.\nIt is intended to help discoverability of keybindings.\n\"\"\"\n\nimport html\nimport fnmatch\nimport re\n\nfrom PyQt5.QtWidgets import QLabel, QSizePolicy\nfrom PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt\n\nfrom qutebrowser.config import config\nfrom qutebrowser.utils import utils, usertypes\nfrom qutebrowser.misc import objects\nfrom qutebrowser.keyinput import keyutils\n\n\nclass KeyHintView(QLabel):\n\n \"\"\"The view showing hints for key bindings based on the current key string.\n\n Attributes:\n _win_id: Window ID of parent.\n\n Signals:\n update_geometry: Emitted when this widget should be resized/positioned.\n \"\"\"\n\n STYLESHEET = \"\"\"\n QLabel {\n font: {{ conf.fonts.keyhint }};\n color: {{ conf.colors.keyhint.fg }};\n background-color: {{ conf.colors.keyhint.bg }};\n padding: 6px;\n {% if conf.statusbar.position == 'top' %}\n border-bottom-right-radius: {{ conf.keyhint.radius }}px;\n {% else %}\n border-top-right-radius: {{ conf.keyhint.radius }}px;\n {% endif %}\n }\n \"\"\"\n update_geometry = pyqtSignal()\n\n def __init__(self, win_id, parent=None):\n super().__init__(parent)\n self.setTextFormat(Qt.RichText)\n self._win_id = win_id\n self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Minimum)\n self.hide()\n self._show_timer = usertypes.Timer(self, 'keyhint_show')\n self._show_timer.timeout.connect(self.show)\n config.set_register_stylesheet(self)\n\n def __repr__(self):\n return utils.get_repr(self, win_id=self._win_id)\n\n def showEvent(self, e):\n \"\"\"Adjust the keyhint size when it's freshly shown.\"\"\"\n self.update_geometry.emit()\n super().showEvent(e)\n\n @pyqtSlot(str)\n def update_keyhint(self, modename, prefix):\n \"\"\"Show hints for the given prefix (or hide if prefix is empty).\n\n Args:\n prefix: The current partial keystring.\n \"\"\"\n countstr, prefix = re.fullmatch(r'(\\d*)(.*)', prefix).groups()\n if not prefix:\n self._show_timer.stop()\n self.hide()\n return\n\n def blacklisted(keychain):\n return any(fnmatch.fnmatchcase(keychain, glob)\n for glob in config.val.keyhint.blacklist)\n\n def takes_count(cmdstr):\n \"\"\"Return true iff this command can take a count argument.\"\"\"\n cmdname = cmdstr.split(' ')[0]\n cmd = objects.commands.get(cmdname)\n return cmd and cmd.takes_count()\n\n bindings_dict = config.key_instance.get_bindings_for(modename)\n bindings = [(k, v) for (k, v) in sorted(bindings_dict.items())\n if keyutils.KeySequence.parse(prefix).matches(k) and\n not blacklisted(str(k)) and\n (takes_count(v) or not countstr)]\n\n if not bindings:\n self._show_timer.stop()\n return\n\n # delay so a quickly typed keychain doesn't display hints\n self._show_timer.setInterval(config.val.keyhint.delay)\n self._show_timer.start()\n suffix_color = html.escape(config.val.colors.keyhint.suffix.fg)\n\n text = ''\n for seq, cmd in bindings:\n text += (\n \"<tr>\"\n \"<td>{}</td>\"\n \"<td style='color: {}'>{}</td>\"\n \"<td style='padding-left: 2ex'>{}</td>\"\n \"</tr>\"\n ).format(\n html.escape(prefix),\n suffix_color,\n html.escape(str(seq)[len(prefix):]),\n html.escape(cmd)\n )\n text = '<table>{}</table>'.format(text)\n\n self.setText(text)\n self.adjustSize()\n self.update_geometry.emit()\n", "path": "qutebrowser/misc/keyhintwidget.py"}]} | 1,999 | 114 |
gh_patches_debug_11218 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-2984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FL failing since at least 2019-06-03
FL has been failing since 2019-06-03
Based on automated runs it appears that FL has not run successfully in 2 days (2019-06-03).
```
04:01:17 CRITICAL pupa: Session(s) 2009B, 2003C, 2003B, 2002E, 2004A, 2012 Org., 2007D, 1998 Org, 2000A (Jan.), 2007C, 2007A, 2000A (Dec.), 2006 Org., 2000 Org., 2001C, 2005B, 2002D, 2008 Org., 2018 Org., 2003A, 2010 Org., 2004 Org., 2003D, 2007B, 2009A, 2001B, 2014 Org., 2002 Org., 2016 Org., 2010C, 2003E were reported by Florida.get_session_list() but were not found in Florida.legislative_sessions or Florida.ignored_scraped_sessions.
loaded Open States pupa settings...
fl (scrape, import)
bills: {}
```
Visit http://bobsled.openstates.org for more info.
</issue>
<code>
[start of openstates/fl/__init__.py]
1 # encoding=utf-8
2 import logging
3 from pupa.scrape import Jurisdiction, Organization
4 from .bills import FlBillScraper
5 from .people import FlPersonScraper
6 # from .committees import FlCommitteeScraper
7 # from .events import FlEventScraper
8 from openstates.utils import url_xpath
9
10 logging.getLogger(__name__).addHandler(logging.NullHandler())
11
12
13 class Florida(Jurisdiction):
14 division_id = "ocd-division/country:us/state:fl"
15 classification = "government"
16 name = "Florida"
17 url = "http://myflorida.com"
18
19 scrapers = {
20 "bills": FlBillScraper,
21 "people": FlPersonScraper,
22 # "committees": FlCommitteeScraper,
23 # "events": FlEventScraper,
24 }
25 legislative_sessions = [
26 {'name': '2011 Regular Session', 'identifier': '2011',
27 'classification': 'primary'},
28 {'name': '2012 Regular Session', 'identifier': '2012',
29 'classification': 'primary'},
30 {'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B',
31 'classification': 'special'},
32 {'name': '2013 Regular Session', 'identifier': '2013',
33 'classification': 'primary'},
34 {'name': '2014 Regular Session', 'identifier': '2014',
35 'classification': 'primary'},
36 {'name': '2014 Special Session A',
37 'identifier': '2014A', 'classification': 'special'},
38 # data for the below
39 {'name': '2015 Regular Session', 'identifier': '2015',
40 'classification': 'primary'},
41 {'name': '2015 Special Session A',
42 'identifier': '2015A', 'classification': 'special'},
43 {'name': '2015 Special Session B',
44 'identifier': '2015B', 'classification': 'special'},
45 {'name': '2015 Special Session C',
46 'identifier': '2015C', 'classification': 'special'},
47 {'name': '2016 Regular Session', 'identifier': '2016',
48 'classification': 'primary'},
49 {'name': '2017 Regular Session', 'identifier': '2017', 'classification': 'primary',
50 'start_date': '2017-03-07', 'end_date': '2017-05-05'},
51 {'name': '2017 Special Session A',
52 'identifier': '2017A', 'classification': 'special'},
53 {'name': '2018 Regular Session', 'identifier': '2018', 'classification': 'primary',
54 'start_date': '2018-01-08', 'end_date': '2018-03-09'},
55 {'name': '2019 Regular Session', 'identifier': '2019', 'classification': 'primary',
56 'start_date': '2019-03-05', 'end_date': '2019-05-03'},
57 ]
58 ignored_scraped_sessions = [
59 *(str(each) for each in range(1997, 2010)),
60 '2010', '2010A', '2010O',
61 '2012O',
62 '2014O',
63 '2016O',
64 '2018O',
65 ]
66
67 def get_organizations(self):
68 legis = Organization(name="Florida Legislature",
69 classification="legislature")
70
71 upper = Organization(
72 'Florida Senate', classification='upper', parent_id=legis._id)
73 lower = Organization('Florida House of Representatives', classification='lower',
74 parent_id=legis._id)
75
76 yield legis
77 yield upper
78 yield lower
79
80 def get_session_list(self):
81 return url_xpath('http://flsenate.gov', '//option/text()')
82
[end of openstates/fl/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/fl/__init__.py b/openstates/fl/__init__.py
--- a/openstates/fl/__init__.py
+++ b/openstates/fl/__init__.py
@@ -62,6 +62,37 @@
'2014O',
'2016O',
'2018O',
+ '2018 Org.',
+ '2016 Org.',
+ '2014 Org.',
+ '2012 Org.',
+ '2010 Org.',
+ '2010C',
+ '2009B',
+ '2009A',
+ '2008 Org.',
+ '2007D',
+ '2007C',
+ '2007B',
+ '2007A',
+ '2006 Org.',
+ '2005B',
+ '2004A',
+ '2004 Org.',
+ '2003E',
+ '2003D',
+ '2003C',
+ '2003B',
+ '2003A',
+ '2002E',
+ '2002D',
+ '2002 Org.',
+ '2001C',
+ '2001B',
+ '2000A (Jan.)',
+ '2000A (Dec.)',
+ '2000 Org.',
+ '1998 Org',
]
def get_organizations(self):
| {"golden_diff": "diff --git a/openstates/fl/__init__.py b/openstates/fl/__init__.py\n--- a/openstates/fl/__init__.py\n+++ b/openstates/fl/__init__.py\n@@ -62,6 +62,37 @@\n '2014O',\n '2016O',\n '2018O',\n+ '2018 Org.',\n+ '2016 Org.',\n+ '2014 Org.',\n+ '2012 Org.',\n+ '2010 Org.',\n+ '2010C',\n+ '2009B',\n+ '2009A',\n+ '2008 Org.',\n+ '2007D',\n+ '2007C',\n+ '2007B',\n+ '2007A',\n+ '2006 Org.',\n+ '2005B',\n+ '2004A',\n+ '2004 Org.',\n+ '2003E',\n+ '2003D',\n+ '2003C',\n+ '2003B',\n+ '2003A',\n+ '2002E',\n+ '2002D',\n+ '2002 Org.',\n+ '2001C',\n+ '2001B',\n+ '2000A (Jan.)',\n+ '2000A (Dec.)',\n+ '2000 Org.',\n+ '1998 Org',\n ]\n \n def get_organizations(self):\n", "issue": "FL failing since at least 2019-06-03\nFL has been failing since 2019-06-03\n\nBased on automated runs it appears that FL has not run successfully in 2 days (2019-06-03).\n\n\n```\n 04:01:17 CRITICAL pupa: Session(s) 2009B, 2003C, 2003B, 2002E, 2004A, 2012 Org., 2007D, 1998 Org, 2000A (Jan.), 2007C, 2007A, 2000A (Dec.), 2006 Org., 2000 Org., 2001C, 2005B, 2002D, 2008 Org., 2018 Org., 2003A, 2010 Org., 2004 Org., 2003D, 2007B, 2009A, 2001B, 2014 Org., 2002 Org., 2016 Org., 2010C, 2003E were reported by Florida.get_session_list() but were not found in Florida.legislative_sessions or Florida.ignored_scraped_sessions.\nloaded Open States pupa settings...\nfl (scrape, import)\n bills: {}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "# encoding=utf-8\nimport logging\nfrom pupa.scrape import Jurisdiction, Organization\nfrom .bills import FlBillScraper\nfrom .people import FlPersonScraper\n# from .committees import FlCommitteeScraper\n# from .events import FlEventScraper\nfrom openstates.utils import url_xpath\n\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n\n\nclass Florida(Jurisdiction):\n division_id = \"ocd-division/country:us/state:fl\"\n classification = \"government\"\n name = \"Florida\"\n url = \"http://myflorida.com\"\n\n scrapers = {\n \"bills\": FlBillScraper,\n \"people\": FlPersonScraper,\n # \"committees\": FlCommitteeScraper,\n # \"events\": FlEventScraper,\n }\n legislative_sessions = [\n {'name': '2011 Regular Session', 'identifier': '2011',\n 'classification': 'primary'},\n {'name': '2012 Regular Session', 'identifier': '2012',\n 'classification': 'primary'},\n {'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B',\n 'classification': 'special'},\n {'name': '2013 Regular Session', 'identifier': '2013',\n 'classification': 'primary'},\n {'name': '2014 Regular Session', 'identifier': '2014',\n 'classification': 'primary'},\n {'name': '2014 Special Session A',\n 'identifier': '2014A', 'classification': 'special'},\n # data for the below\n {'name': '2015 Regular Session', 'identifier': '2015',\n 'classification': 'primary'},\n {'name': '2015 Special Session A',\n 'identifier': '2015A', 'classification': 'special'},\n {'name': '2015 Special Session B',\n 'identifier': '2015B', 'classification': 'special'},\n {'name': '2015 Special Session C',\n 'identifier': '2015C', 'classification': 'special'},\n {'name': '2016 Regular Session', 'identifier': '2016',\n 'classification': 'primary'},\n {'name': '2017 Regular Session', 'identifier': '2017', 'classification': 'primary',\n 'start_date': '2017-03-07', 'end_date': '2017-05-05'},\n {'name': '2017 Special Session A',\n 'identifier': '2017A', 'classification': 'special'},\n {'name': '2018 Regular Session', 'identifier': '2018', 'classification': 'primary',\n 'start_date': '2018-01-08', 'end_date': '2018-03-09'},\n {'name': '2019 Regular Session', 'identifier': '2019', 'classification': 'primary',\n 'start_date': '2019-03-05', 'end_date': '2019-05-03'},\n ]\n ignored_scraped_sessions = [\n *(str(each) for each in range(1997, 2010)),\n '2010', '2010A', '2010O',\n '2012O',\n '2014O',\n '2016O',\n '2018O',\n ]\n\n def get_organizations(self):\n legis = Organization(name=\"Florida Legislature\",\n classification=\"legislature\")\n\n upper = Organization(\n 'Florida Senate', classification='upper', parent_id=legis._id)\n lower = Organization('Florida House of Representatives', classification='lower',\n parent_id=legis._id)\n\n yield legis\n yield upper\n yield lower\n\n def get_session_list(self):\n return url_xpath('http://flsenate.gov', '//option/text()')\n", "path": "openstates/fl/__init__.py"}]} | 1,973 | 373 |
gh_patches_debug_36606 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-3442 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TW production parser down
## Description
This is an automatic error report generated for Taiwan (TW).
Issues:
- No recent data found for `production` parser
## Suggestions
- Try running the parser locally using the command `poetry run test_parser TW production`
- <a href="https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!t,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:TW,type:phrase),type:phrase,value:TW),query:(match:(extra.key:(query:TW,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))">Explore the runtime logs</a>
You can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).
</issue>
<code>
[start of parsers/TW.py]
1 #!/usr/bin/env python3
2 import arrow
3 import requests
4 import pandas
5 import dateutil
6
7
8 def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'
13 s = session or requests.Session()
14 response = s.get(url)
15 data = response.json()
16
17 dumpDate = data['']
18 prodData = data['aaData']
19
20 tz = 'Asia/Taipei'
21 dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
22
23 objData = pandas.DataFrame(prodData)
24
25 objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
26 'additional']
27
28 objData['fueltype'] = objData.fueltype.str.split('(').str[1]
29 objData['fueltype'] = objData.fueltype.str.split(')').str[0]
30 objData.drop('additional', axis=1, inplace=True)
31 objData.drop('percentage', axis=1, inplace=True)
32
33 objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')
34 objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')
35 production = pandas.DataFrame(objData.groupby('fueltype').sum())
36 production.columns = ['capacity', 'output']
37
38 coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity
39 gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity
40 oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity
41
42 coal_production = production.loc['Coal'].output + production.loc['IPP-Coal'].output
43 gas_production = production.loc['LNG'].output + production.loc['IPP-LNG'].output
44 oil_production = production.loc['Oil'].output + production.loc['Diesel'].output
45
46 # For storage, note that load will be negative, and generation positive.
47 # We require the opposite
48
49 returndata = {
50 'zoneKey': zone_key,
51 'datetime': dumpDate.datetime,
52 'production': {
53 'coal': coal_production,
54 'gas': gas_production,
55 'oil': oil_production,
56 'hydro': production.loc['Hydro'].output,
57 'nuclear': production.loc['Nuclear'].output,
58 'solar': production.loc['Solar'].output,
59 'wind': production.loc['Wind'].output,
60 'unknown': production.loc['Co-Gen'].output
61 },
62 'capacity': {
63 'coal': coal_capacity,
64 'gas': gas_capacity,
65 'oil': oil_capacity,
66 'hydro': production.loc['Hydro'].capacity,
67 'hydro storage':production.loc['Pumping Gen'].capacity,
68 'nuclear': production.loc['Nuclear'].capacity,
69 'solar': production.loc['Solar'].capacity,
70 'wind': production.loc['Wind'].capacity,
71 'unknown': production.loc['Co-Gen'].capacity
72 },
73 'storage': {
74 'hydro': -1 * production.loc['Pumping Load'].output - production.loc['Pumping Gen'].output
75 },
76 'source': 'taipower.com.tw'
77 }
78
79 return returndata
80
81
82 if __name__ == '__main__':
83 print(fetch_production())
84
[end of parsers/TW.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/TW.py b/parsers/TW.py
--- a/parsers/TW.py
+++ b/parsers/TW.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
import arrow
-import requests
-import pandas
import dateutil
+import pandas as pd
+import requests
def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:
@@ -20,21 +20,27 @@
tz = 'Asia/Taipei'
dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))
- objData = pandas.DataFrame(prodData)
+ objData = pd.DataFrame(prodData)
- objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',
- 'additional']
+ columns = ['fueltype', 'additional_1', 'name', 'capacity', 'output', 'percentage', 'additional_2']
+ assert len(objData.iloc[0]) == len(columns), "number of input columns changed"
+ objData.columns = columns
objData['fueltype'] = objData.fueltype.str.split('(').str[1]
objData['fueltype'] = objData.fueltype.str.split(')').str[0]
- objData.drop('additional', axis=1, inplace=True)
- objData.drop('percentage', axis=1, inplace=True)
+ objData.loc[:,['capacity', 'output']] = objData[['capacity', 'output']].apply(pd.to_numeric, errors='coerce')
+ assert not objData.capacity.isna().all(), "capacity data is entirely NaN - input column order may have changed"
+ assert not objData.output.isna().all(), "output data is entirely NaN - input column order may have changed"
- objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')
- objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')
- production = pandas.DataFrame(objData.groupby('fueltype').sum())
+ objData.drop(columns=['additional_1', 'name', 'additional_2', 'percentage'], axis=1, inplace=True)
+ # summing because items in returned object are for each power plant and operational units
+ production = pd.DataFrame(objData.groupby('fueltype').sum())
production.columns = ['capacity', 'output']
+ # check output values coincide with total capacity by fuel type
+ check_values = production.output <= production.capacity
+ assert check_values.loc[~check_values.index.isin(["Co-Gen"])].all(), "output > capacity" # HACK: Co-Gen capacity is underestimated
+
coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity
gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity
oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity
| {"golden_diff": "diff --git a/parsers/TW.py b/parsers/TW.py\n--- a/parsers/TW.py\n+++ b/parsers/TW.py\n@@ -1,8 +1,8 @@\n #!/usr/bin/env python3\n import arrow\n-import requests\n-import pandas\n import dateutil\n+import pandas as pd\n+import requests\n \n \n def fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:\n@@ -20,21 +20,27 @@\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n \n- objData = pandas.DataFrame(prodData)\n+ objData = pd.DataFrame(prodData)\n \n- objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n- 'additional']\n+ columns = ['fueltype', 'additional_1', 'name', 'capacity', 'output', 'percentage', 'additional_2']\n+ assert len(objData.iloc[0]) == len(columns), \"number of input columns changed\"\n+ objData.columns = columns\n \n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n- objData.drop('additional', axis=1, inplace=True)\n- objData.drop('percentage', axis=1, inplace=True)\n+ objData.loc[:,['capacity', 'output']] = objData[['capacity', 'output']].apply(pd.to_numeric, errors='coerce')\n+ assert not objData.capacity.isna().all(), \"capacity data is entirely NaN - input column order may have changed\"\n+ assert not objData.output.isna().all(), \"output data is entirely NaN - input column order may have changed\"\n \n- objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')\n- objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')\n- production = pandas.DataFrame(objData.groupby('fueltype').sum())\n+ objData.drop(columns=['additional_1', 'name', 'additional_2', 'percentage'], axis=1, inplace=True)\n+ # summing because items in returned object are for each power plant and operational units\n+ production = pd.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n \n+ # check output values coincide with total capacity by fuel type\n+ check_values = production.output <= production.capacity\n+ assert check_values.loc[~check_values.index.isin([\"Co-Gen\"])].all(), \"output > capacity\" # HACK: Co-Gen capacity is underestimated\n+\n coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity\n gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity\n oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity\n", "issue": "TW production parser down\n## Description\n\nThis is an automatic error report generated for Taiwan (TW).\n\nIssues:\n- No recent data found for `production` parser\n\n## Suggestions\n- Try running the parser locally using the command `poetry run test_parser TW production`\n- <a href=\"https://kibana.electricitymap.org/app/kibana#/discover/10af54f0-0c4a-11e9-85c1-1d63df8c862c?_g=(refreshInterval:('$$hashKey':'object:232',display:'5%20minutes',pause:!f,section:2,value:300000),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(message,extra.key,level),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!t,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:level,negate:!f,params:(query:ERROR,type:phrase),type:phrase,value:ERROR),query:(match:(level:(query:ERROR,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'96f67170-0c49-11e9-85c1-1d63df8c862c',key:extra.key,negate:!f,params:(query:TW,type:phrase),type:phrase,value:TW),query:(match:(extra.key:(query:TW,type:phrase))))),index:'96f67170-0c49-11e9-85c1-1d63df8c862c',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))\">Explore the runtime logs</a>\n\nYou can see an overview of all parser issues [here](https://github.com/tmrowco/electricitymap-contrib/wiki/Parser-issues).\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport arrow\nimport requests\nimport pandas\nimport dateutil\n\n\ndef fetch_production(zone_key='TW', session=None, target_datetime=None, logger=None) -> dict:\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n url = 'http://www.taipower.com.tw/d006/loadGraph/loadGraph/data/genary.txt'\n s = session or requests.Session()\n response = s.get(url)\n data = response.json()\n\n dumpDate = data['']\n prodData = data['aaData']\n\n tz = 'Asia/Taipei'\n dumpDate = arrow.get(dumpDate, 'YYYY-MM-DD HH:mm').replace(tzinfo=dateutil.tz.gettz(tz))\n\n objData = pandas.DataFrame(prodData)\n\n objData.columns = ['fueltype', 'name', 'capacity', 'output', 'percentage',\n 'additional']\n\n objData['fueltype'] = objData.fueltype.str.split('(').str[1]\n objData['fueltype'] = objData.fueltype.str.split(')').str[0]\n objData.drop('additional', axis=1, inplace=True)\n objData.drop('percentage', axis=1, inplace=True)\n\n objData['capacity'] = pandas.to_numeric(objData['capacity'], errors='coerce')\n objData['output'] = pandas.to_numeric(objData['output'], errors='coerce')\n production = pandas.DataFrame(objData.groupby('fueltype').sum())\n production.columns = ['capacity', 'output']\n\n coal_capacity = production.loc['Coal'].capacity + production.loc['IPP-Coal'].capacity\n gas_capacity = production.loc['LNG'].capacity + production.loc['IPP-LNG'].capacity\n oil_capacity = production.loc['Oil'].capacity + production.loc['Diesel'].capacity\n\n coal_production = production.loc['Coal'].output + production.loc['IPP-Coal'].output\n gas_production = production.loc['LNG'].output + production.loc['IPP-LNG'].output\n oil_production = production.loc['Oil'].output + production.loc['Diesel'].output\n\n # For storage, note that load will be negative, and generation positive.\n # We require the opposite\n\n returndata = {\n 'zoneKey': zone_key,\n 'datetime': dumpDate.datetime,\n 'production': {\n 'coal': coal_production,\n 'gas': gas_production,\n 'oil': oil_production,\n 'hydro': production.loc['Hydro'].output,\n 'nuclear': production.loc['Nuclear'].output,\n 'solar': production.loc['Solar'].output,\n 'wind': production.loc['Wind'].output,\n 'unknown': production.loc['Co-Gen'].output\n },\n 'capacity': {\n 'coal': coal_capacity,\n 'gas': gas_capacity,\n 'oil': oil_capacity,\n 'hydro': production.loc['Hydro'].capacity,\n 'hydro storage':production.loc['Pumping Gen'].capacity,\n 'nuclear': production.loc['Nuclear'].capacity,\n 'solar': production.loc['Solar'].capacity,\n 'wind': production.loc['Wind'].capacity,\n 'unknown': production.loc['Co-Gen'].capacity\n },\n 'storage': {\n 'hydro': -1 * production.loc['Pumping Load'].output - production.loc['Pumping Gen'].output\n },\n 'source': 'taipower.com.tw'\n }\n\n return returndata\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/TW.py"}]} | 1,931 | 666 |
gh_patches_debug_7033 | rasdani/github-patches | git_diff | akvo__akvo-rsr-1942 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Organisations list gives timeout
## Test plan
The organisations list should not give a timeout. Since this only happened on Live, it is hard to debug.
## Sentry
See http://sentry.support.akvo-ops.org/rsr/live/group/742/
</issue>
<code>
[start of akvo/rsr/views/organisation.py]
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please
7 see < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 from django.db.models import Prefetch
11 from django.db.models import Count
12 from django.shortcuts import get_object_or_404, render
13
14 from ..filters import location_choices, OrganisationFilter, remove_empty_querydict_items
15 from ..models import Employment, Organisation, Project, ProjectUpdate
16 from ...utils import pagination, filter_query_string
17 from .utils import apply_keywords, org_projects, show_filter_class
18
19 ###############################################################################
20 # Organisation directory
21 ###############################################################################
22
23
24 def _public_projects():
25 """Return all public projects."""
26 return Project.objects.public().published().select_related('partners').order_by('-id')
27
28
29 def _page_organisations(page):
30 """Dig out the list or organisations to use."""
31 projects = org_projects(page.organisation) if page.partner_projects else _public_projects()
32 keyword_projects = apply_keywords(page, projects)
33 return keyword_projects.all_partners()
34
35
36 def _organisation_directory_coll(request):
37 """Dig out and pass correct organisations to the view."""
38 page = request.rsr_page
39 if not page:
40 return Organisation.objects.all()
41 return _page_organisations(page)
42
43
44 def directory(request):
45 """The Organisation list view."""
46 qs = remove_empty_querydict_items(request.GET)
47
48 # Set show_filters to "in" if any filter is selected
49 filter_class = show_filter_class(qs, ['location', ])
50
51 # Yank Organisation collection
52 all_organisations = _organisation_directory_coll(request)
53
54 # Easter egg feature
55 creator_organisations = request.GET.get('creator', False)
56 if creator_organisations:
57 all_organisations = all_organisations.filter(can_create_projects=True)
58
59 f = OrganisationFilter(qs, queryset=all_organisations)
60
61 # Change filter options further when on an Akvo Page
62 if request.rsr_page:
63 # Filter location filter list to only populated locations
64 f.filters['location'].extra['choices'] = location_choices(all_organisations)
65
66 # Build page
67 page = request.GET.get('page')
68 page, paginator, page_range = pagination(page, f.qs.distinct(), 10)
69
70 # Get organisations to be displayed on the map
71 if request.rsr_page and request.rsr_page.all_maps:
72 map_orgs = all_organisations
73 else:
74 map_orgs = page.object_list
75 map_orgs = map_orgs
76
77 # Get related objects of page at once
78 page.object_list = page.object_list.select_related(
79 'primary_location__country',
80 ).annotate(
81 num_employees=Count('employees', distinct=True),
82 num_projects=Count('projects', distinct=True),
83 num_updates=Count('projects__project_updates', distinct=True),
84 )
85
86 return render(request, 'organisation_directory.html', {
87 'orgs_count': f.qs.distinct().count(),
88 'filter': f,
89 'page': page,
90 'paginator': paginator,
91 'page_range': page_range,
92 'show_filters': filter_class,
93 'q': filter_query_string(qs),
94 'map_organisations': map_orgs,
95 })
96
97
98 ###############################################################################
99 # Organisation main
100 ###############################################################################
101
102
103 def main(request, organisation_id):
104 """The organisation main view."""
105 return render(request, 'organisation_main.html', {
106 'organisation': get_object_or_404(Organisation, pk=organisation_id)})
107
[end of akvo/rsr/views/organisation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/views/organisation.py b/akvo/rsr/views/organisation.py
--- a/akvo/rsr/views/organisation.py
+++ b/akvo/rsr/views/organisation.py
@@ -77,10 +77,6 @@
# Get related objects of page at once
page.object_list = page.object_list.select_related(
'primary_location__country',
- ).annotate(
- num_employees=Count('employees', distinct=True),
- num_projects=Count('projects', distinct=True),
- num_updates=Count('projects__project_updates', distinct=True),
)
return render(request, 'organisation_directory.html', {
| {"golden_diff": "diff --git a/akvo/rsr/views/organisation.py b/akvo/rsr/views/organisation.py\n--- a/akvo/rsr/views/organisation.py\n+++ b/akvo/rsr/views/organisation.py\n@@ -77,10 +77,6 @@\n # Get related objects of page at once\n page.object_list = page.object_list.select_related(\n 'primary_location__country',\n- ).annotate(\n- num_employees=Count('employees', distinct=True),\n- num_projects=Count('projects', distinct=True),\n- num_updates=Count('projects__project_updates', distinct=True),\n )\n \n return render(request, 'organisation_directory.html', {\n", "issue": "Organisations list gives timeout\n## Test plan\n\nThe organisations list should not give a timeout. Since this only happened on Live, it is hard to debug.\n## Sentry\n\nSee http://sentry.support.akvo-ops.org/rsr/live/group/742/\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.db.models import Prefetch\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404, render\n\nfrom ..filters import location_choices, OrganisationFilter, remove_empty_querydict_items\nfrom ..models import Employment, Organisation, Project, ProjectUpdate\nfrom ...utils import pagination, filter_query_string\nfrom .utils import apply_keywords, org_projects, show_filter_class\n\n###############################################################################\n# Organisation directory\n###############################################################################\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners').order_by('-id')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _organisation_directory_coll(request):\n \"\"\"Dig out and pass correct organisations to the view.\"\"\"\n page = request.rsr_page\n if not page:\n return Organisation.objects.all()\n return _page_organisations(page)\n\n\ndef directory(request):\n \"\"\"The Organisation list view.\"\"\"\n qs = remove_empty_querydict_items(request.GET)\n\n # Set show_filters to \"in\" if any filter is selected\n filter_class = show_filter_class(qs, ['location', ])\n\n # Yank Organisation collection\n all_organisations = _organisation_directory_coll(request)\n\n # Easter egg feature\n creator_organisations = request.GET.get('creator', False)\n if creator_organisations:\n all_organisations = all_organisations.filter(can_create_projects=True)\n\n f = OrganisationFilter(qs, queryset=all_organisations)\n\n # Change filter options further when on an Akvo Page\n if request.rsr_page:\n # Filter location filter list to only populated locations\n f.filters['location'].extra['choices'] = location_choices(all_organisations)\n\n # Build page\n page = request.GET.get('page')\n page, paginator, page_range = pagination(page, f.qs.distinct(), 10)\n\n # Get organisations to be displayed on the map\n if request.rsr_page and request.rsr_page.all_maps:\n map_orgs = all_organisations\n else:\n map_orgs = page.object_list\n map_orgs = map_orgs\n\n # Get related objects of page at once\n page.object_list = page.object_list.select_related(\n 'primary_location__country',\n ).annotate(\n num_employees=Count('employees', distinct=True),\n num_projects=Count('projects', distinct=True),\n num_updates=Count('projects__project_updates', distinct=True),\n )\n\n return render(request, 'organisation_directory.html', {\n 'orgs_count': f.qs.distinct().count(),\n 'filter': f,\n 'page': page,\n 'paginator': paginator,\n 'page_range': page_range,\n 'show_filters': filter_class,\n 'q': filter_query_string(qs),\n 'map_organisations': map_orgs,\n })\n\n\n###############################################################################\n# Organisation main\n###############################################################################\n\n\ndef main(request, organisation_id):\n \"\"\"The organisation main view.\"\"\"\n return render(request, 'organisation_main.html', {\n 'organisation': get_object_or_404(Organisation, pk=organisation_id)})\n", "path": "akvo/rsr/views/organisation.py"}]} | 1,599 | 149 |
gh_patches_debug_18476 | rasdani/github-patches | git_diff | learningequality__kolibri-11433 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow site title to be customised
## Overview
Allow the site title to be customised; it’s currently hardcoded as ‘Kolibri’.
#### Description and outcomes
The site title is used in only a few places: the `<title>` of the base page and the ‘unsupported browser’ page, and the name in the PWA manifest.
Almost all of the time, the title is overridden by the plugin being used, via vuejs, so users will typically see something like ‘Explore’ or ‘Library’ instead of ‘Kolibri’.
The place where the default ‘Kolibri’ title is slightly problematic at the moment is in the PWA plugin: the name of the PWA is set to ‘Kolibri’, and that’s shown much more prominently in the browser’s list of PWA apps, or on the desktop app chooser when trying to run it.
For Endless Key in particular, that’s a bit problematic because users will likely try to find the PWA from their desktop by searching for ‘Endless Key’ rather than ‘Kolibri’.
So it would be good to either be able to:
- Separate the site title from the name of the platform (which will always be Kolibri), and allow the site title to be customised.
- Or, specifically set the site title in the configuration for the PWA plugin.
The second option is much more self-contained, but doesn’t seem semantically correct to me. The PWA manifest should be reflecting the main site’s configuration.
#### Resources
- https://developer.mozilla.org/en-US/docs/Web/Manifest/name
- https://developer.mozilla.org/en-US/docs/Web/Manifest/short_name
#### Accessibility Requirements
Having an installed PWA use the name the users will be most familiar with it seems like an accessibility issue, although I have not been approaching it from that angle and don’t know which specific accessibility spec applies here.
</issue>
<code>
[start of kolibri/core/templatetags/core_tags.py]
1 """
2 Kolibri template tags
3 =====================
4 """
5 from __future__ import absolute_import
6 from __future__ import print_function
7 from __future__ import unicode_literals
8
9 from django import template
10 from django.templatetags.static import static
11 from django.utils.html import format_html
12
13 from kolibri.core.hooks import FrontEndBaseASyncHook
14 from kolibri.core.hooks import FrontEndBaseHeadHook
15 from kolibri.core.hooks import FrontEndBaseSyncHook
16 from kolibri.core.theme_hook import ThemeHook
17
18 register = template.Library()
19
20
21 @register.simple_tag()
22 def frontend_base_assets():
23 """
24 This is a script tag for all ``FrontEndAssetHook`` hooks that implement a
25 render_to_html() method - this is used in ``/base.html`` template to
26 populate any Javascript and CSS that should be loaded at page load.
27
28 :return: HTML of script tags to insert into base.html
29 """
30 return FrontEndBaseSyncHook.html()
31
32
33 @register.simple_tag()
34 def frontend_base_async_assets():
35 """
36 This is a script tag for all ``FrontEndAssetHook`` hooks that implement a
37 render_to_html() method - this is used in ``/base.html`` template to
38 populate any Javascript and CSS that should be loaded at page load.
39
40 :return: HTML of script tags to insert into base.html
41 """
42 return FrontEndBaseASyncHook.html()
43
44
45 @register.simple_tag()
46 def frontend_base_head_markup():
47 """
48 This is a script tag for all ``FrontEndBaseHeadHook`` hooks that implement
49 a render_to_html() method - this is used in the ``/base.html`` template to
50 inject arbitrary markup into the ``<head>`` element.
51
52 :return: HTML to insert into head of base.html
53 """
54 return FrontEndBaseHeadHook.html()
55
56
57 @register.simple_tag()
58 def theme_favicon():
59 """
60 Render a favicon link to put in the <head> tag of base.html, if a favicon is
61 provided by the theme. If not, a default will be returned.
62 """
63 favicon_urls = [
64 logo["src"]
65 for logo in ThemeHook.get_theme().get("logos", [])
66 if logo.get("content_type", "") == "image/vnd.microsoft.icon"
67 ]
68
69 # Choose the first available .ico file. It's unlikely there's more than
70 # one specified in the theme.
71 favicon_url = favicon_urls[0] if favicon_urls else static("assets/logo.ico")
72
73 return format_html('<link rel="shortcut icon" href="{}">', favicon_url)
74
[end of kolibri/core/templatetags/core_tags.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/core/templatetags/core_tags.py b/kolibri/core/templatetags/core_tags.py
--- a/kolibri/core/templatetags/core_tags.py
+++ b/kolibri/core/templatetags/core_tags.py
@@ -14,6 +14,7 @@
from kolibri.core.hooks import FrontEndBaseHeadHook
from kolibri.core.hooks import FrontEndBaseSyncHook
from kolibri.core.theme_hook import ThemeHook
+from kolibri.utils.translation import ugettext as _
register = template.Library()
@@ -71,3 +72,13 @@
favicon_url = favicon_urls[0] if favicon_urls else static("assets/logo.ico")
return format_html('<link rel="shortcut icon" href="{}">', favicon_url)
+
+
[email protected]_tag()
+def site_title():
+ """
+ Return the text of the site title, if provided by the theme. If not, the
+ default will be returned. The site title may be translated, to allow for
+ transliteration into other alphabets where needed.
+ """
+ return ThemeHook.get_theme().get("siteTitle", _("Kolibri"))
| {"golden_diff": "diff --git a/kolibri/core/templatetags/core_tags.py b/kolibri/core/templatetags/core_tags.py\n--- a/kolibri/core/templatetags/core_tags.py\n+++ b/kolibri/core/templatetags/core_tags.py\n@@ -14,6 +14,7 @@\n from kolibri.core.hooks import FrontEndBaseHeadHook\n from kolibri.core.hooks import FrontEndBaseSyncHook\n from kolibri.core.theme_hook import ThemeHook\n+from kolibri.utils.translation import ugettext as _\n \n register = template.Library()\n \n@@ -71,3 +72,13 @@\n favicon_url = favicon_urls[0] if favicon_urls else static(\"assets/logo.ico\")\n \n return format_html('<link rel=\"shortcut icon\" href=\"{}\">', favicon_url)\n+\n+\[email protected]_tag()\n+def site_title():\n+ \"\"\"\n+ Return the text of the site title, if provided by the theme. If not, the\n+ default will be returned. The site title may be translated, to allow for\n+ transliteration into other alphabets where needed.\n+ \"\"\"\n+ return ThemeHook.get_theme().get(\"siteTitle\", _(\"Kolibri\"))\n", "issue": "Allow site title to be customised\n## Overview\r\n\r\nAllow the site title to be customised; it\u2019s currently hardcoded as \u2018Kolibri\u2019.\r\n\r\n#### Description and outcomes\r\n\r\nThe site title is used in only a few places: the `<title>` of the base page and the \u2018unsupported browser\u2019 page, and the name in the PWA manifest.\r\n\r\nAlmost all of the time, the title is overridden by the plugin being used, via vuejs, so users will typically see something like \u2018Explore\u2019 or \u2018Library\u2019 instead of \u2018Kolibri\u2019.\r\n\r\nThe place where the default \u2018Kolibri\u2019 title is slightly problematic at the moment is in the PWA plugin: the name of the PWA is set to \u2018Kolibri\u2019, and that\u2019s shown much more prominently in the browser\u2019s list of PWA apps, or on the desktop app chooser when trying to run it.\r\n\r\nFor Endless Key in particular, that\u2019s a bit problematic because users will likely try to find the PWA from their desktop by searching for \u2018Endless Key\u2019 rather than \u2018Kolibri\u2019.\r\n\r\nSo it would be good to either be able to:\r\n - Separate the site title from the name of the platform (which will always be Kolibri), and allow the site title to be customised.\r\n - Or, specifically set the site title in the configuration for the PWA plugin.\r\n\r\nThe second option is much more self-contained, but doesn\u2019t seem semantically correct to me. The PWA manifest should be reflecting the main site\u2019s configuration.\r\n\r\n#### Resources\r\n\r\n - https://developer.mozilla.org/en-US/docs/Web/Manifest/name\r\n - https://developer.mozilla.org/en-US/docs/Web/Manifest/short_name\r\n\r\n#### Accessibility Requirements\r\n\r\nHaving an installed PWA use the name the users will be most familiar with it seems like an accessibility issue, although I have not been approaching it from that angle and don\u2019t know which specific accessibility spec applies here.\n", "before_files": [{"content": "\"\"\"\nKolibri template tags\n=====================\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom django import template\nfrom django.templatetags.static import static\nfrom django.utils.html import format_html\n\nfrom kolibri.core.hooks import FrontEndBaseASyncHook\nfrom kolibri.core.hooks import FrontEndBaseHeadHook\nfrom kolibri.core.hooks import FrontEndBaseSyncHook\nfrom kolibri.core.theme_hook import ThemeHook\n\nregister = template.Library()\n\n\[email protected]_tag()\ndef frontend_base_assets():\n \"\"\"\n This is a script tag for all ``FrontEndAssetHook`` hooks that implement a\n render_to_html() method - this is used in ``/base.html`` template to\n populate any Javascript and CSS that should be loaded at page load.\n\n :return: HTML of script tags to insert into base.html\n \"\"\"\n return FrontEndBaseSyncHook.html()\n\n\[email protected]_tag()\ndef frontend_base_async_assets():\n \"\"\"\n This is a script tag for all ``FrontEndAssetHook`` hooks that implement a\n render_to_html() method - this is used in ``/base.html`` template to\n populate any Javascript and CSS that should be loaded at page load.\n\n :return: HTML of script tags to insert into base.html\n \"\"\"\n return FrontEndBaseASyncHook.html()\n\n\[email protected]_tag()\ndef frontend_base_head_markup():\n \"\"\"\n This is a script tag for all ``FrontEndBaseHeadHook`` hooks that implement\n a render_to_html() method - this is used in the ``/base.html`` template to\n inject arbitrary markup into the ``<head>`` element.\n\n :return: HTML to insert into head of base.html\n \"\"\"\n return FrontEndBaseHeadHook.html()\n\n\[email protected]_tag()\ndef theme_favicon():\n \"\"\"\n Render a favicon link to put in the <head> tag of base.html, if a favicon is\n provided by the theme. If not, a default will be returned.\n \"\"\"\n favicon_urls = [\n logo[\"src\"]\n for logo in ThemeHook.get_theme().get(\"logos\", [])\n if logo.get(\"content_type\", \"\") == \"image/vnd.microsoft.icon\"\n ]\n\n # Choose the first available .ico file. It's unlikely there's more than\n # one specified in the theme.\n favicon_url = favicon_urls[0] if favicon_urls else static(\"assets/logo.ico\")\n\n return format_html('<link rel=\"shortcut icon\" href=\"{}\">', favicon_url)\n", "path": "kolibri/core/templatetags/core_tags.py"}]} | 1,633 | 266 |
gh_patches_debug_756 | rasdani/github-patches | git_diff | vllm-project__vllm-1212 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[v0.2.0] Release Tracker
## Major changes
* Up to 60% performance improvement by optimizing de-tokenization and sampler
* Initial support for AWQ (performance not optimized)
* Support for RoPE scaling and LongChat
* Support for Mistral-7B
## PRs to be merged before the release
- [x] Vectorized sampler: #1048, #820
- [x] LongChat: #555
- [x] `TORCH_CUDA_ARCH_LIST` build option: #1074
- [x] Support for Mistral-7B: #1196
- [x] #1198
- ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)
</issue>
<code>
[start of vllm/__init__.py]
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.1.7"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
[end of vllm/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.1.7"
+__version__ = "0.2.0"
__all__ = [
"LLM",
| {"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.1.7\"\n+__version__ = \"0.2.0\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.2.0] Release Tracker\n## Major changes\r\n\r\n* Up to 60% performance improvement by optimizing de-tokenization and sampler\r\n* Initial support for AWQ (performance not optimized)\r\n* Support for RoPE scaling and LongChat\r\n* Support for Mistral-7B\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] Vectorized sampler: #1048, #820 \r\n- [x] LongChat: #555 \r\n- [x] `TORCH_CUDA_ARCH_LIST` build option: #1074 \r\n- [x] Support for Mistral-7B: #1196 \r\n- [x] #1198 \r\n- ~~[ ] FP32 RoPE kernel: #1061~~ (deferred to the next PR)\n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.1.7\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]} | 932 | 109 |
gh_patches_debug_4574 | rasdani/github-patches | git_diff | qtile__qtile-2716 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
stack trace from Clipboard widget
```
2021-08-13 06:48:23,421 ERROR libqtile hook.py:fire():L381 Error in hook selection_change
Traceback (most recent call last):
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/hook.py", line 379, in fire
i(*args, **kwargs)
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py", line 82, in hook_change
if self.is_blacklisted(selection["owner"]):
File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py", line 69, in is_blacklisted
owner = xcbq.Window(self.qtile.core.conn, owner_id)
AttributeError: module 'libqtile.backend.x11.xcbq' has no attribute 'Window'
```
</issue>
<code>
[start of libqtile/widget/clipboard.py]
1 # Copyright (c) 2014 Sean Vig
2 # Copyright (c) 2014 roger
3 # Copyright (c) 2014 Adi Sieker
4 # Copyright (c) 2014 Tycho Andersen
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 from libqtile import bar, hook
25 from libqtile.backend.x11 import xcbq
26 from libqtile.widget import base
27
28
29 class Clipboard(base._TextBox):
30 """Display current clipboard contents"""
31 orientations = base.ORIENTATION_HORIZONTAL
32 defaults = [
33 ("selection", "CLIPBOARD",
34 "the selection to display(CLIPBOARD or PRIMARY)"),
35 ("max_width", 10, "maximum number of characters to display "
36 "(None for all, useful when width is bar.STRETCH)"),
37 ("timeout", 10,
38 "Default timeout (seconds) for display text, None to keep forever"),
39 ("blacklist", ["keepassx"],
40 "list with blacklisted wm_class, sadly not every "
41 "clipboard window sets them, keepassx does."
42 "Clipboard contents from blacklisted wm_classes "
43 "will be replaced by the value of ``blacklist_text``."),
44 ("blacklist_text", "***********",
45 "text to display when the wm_class is blacklisted")
46 ]
47
48 def __init__(self, width=bar.CALCULATED, **config):
49 base._TextBox.__init__(self, "", width, **config)
50 self.add_defaults(Clipboard.defaults)
51 self.timeout_id = None
52
53 def _configure(self, qtile, bar):
54 base._TextBox._configure(self, qtile, bar)
55 self.text = ""
56 self.setup_hooks()
57
58 def clear(self, *args):
59 self.text = ""
60 self.bar.draw()
61
62 def is_blacklisted(self, owner_id):
63 if not self.blacklist:
64 return False
65
66 if owner_id in self.qtile.windows_map:
67 owner = self.qtile.windows_map[owner_id].window
68 else:
69 owner = xcbq.Window(self.qtile.core.conn, owner_id)
70
71 owner_class = owner.get_wm_class()
72 if owner_class:
73 for wm_class in self.blacklist:
74 if wm_class in owner_class:
75 return True
76
77 def setup_hooks(self):
78 def hook_change(name, selection):
79 if name != self.selection:
80 return
81
82 if self.is_blacklisted(selection["owner"]):
83 text = self.blacklist_text
84 else:
85 text = selection["selection"].replace("\n", " ")
86
87 text = text.strip()
88 if self.max_width is not None and len(text) > self.max_width:
89 text = text[:self.max_width] + "..."
90
91 self.text = text
92
93 if self.timeout_id:
94 self.timeout_id.cancel()
95 self.timeout_id = None
96
97 if self.timeout:
98 self.timeout_id = self.timeout_add(self.timeout, self.clear)
99 self.bar.draw()
100
101 def hook_notify(name, selection):
102 if name != self.selection:
103 return
104
105 if self.timeout_id:
106 self.timeout_id.cancel()
107 self.timeout_id = None
108
109 # only clear if don't change don't apply in .5 seconds
110 if self.timeout:
111 self.timeout_id = self.timeout_add(self.timeout, self.clear)
112 self.bar.draw()
113
114 hook.subscribe.selection_notify(hook_notify)
115 hook.subscribe.selection_change(hook_change)
116
[end of libqtile/widget/clipboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/widget/clipboard.py b/libqtile/widget/clipboard.py
--- a/libqtile/widget/clipboard.py
+++ b/libqtile/widget/clipboard.py
@@ -66,7 +66,7 @@
if owner_id in self.qtile.windows_map:
owner = self.qtile.windows_map[owner_id].window
else:
- owner = xcbq.Window(self.qtile.core.conn, owner_id)
+ owner = xcbq.window.XWindow(self.qtile.core.conn, owner_id)
owner_class = owner.get_wm_class()
if owner_class:
| {"golden_diff": "diff --git a/libqtile/widget/clipboard.py b/libqtile/widget/clipboard.py\n--- a/libqtile/widget/clipboard.py\n+++ b/libqtile/widget/clipboard.py\n@@ -66,7 +66,7 @@\n if owner_id in self.qtile.windows_map:\n owner = self.qtile.windows_map[owner_id].window\n else:\n- owner = xcbq.Window(self.qtile.core.conn, owner_id)\n+ owner = xcbq.window.XWindow(self.qtile.core.conn, owner_id)\n \n owner_class = owner.get_wm_class()\n if owner_class:\n", "issue": "stack trace from Clipboard widget\n```\r\n2021-08-13 06:48:23,421 ERROR libqtile hook.py:fire():L381 Error in hook selection_change\r\nTraceback (most recent call last):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/hook.py\", line 379, in fire\r\n i(*args, **kwargs)\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py\", line 82, in hook_change\r\n if self.is_blacklisted(selection[\"owner\"]):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/clipboard.py\", line 69, in is_blacklisted\r\n owner = xcbq.Window(self.qtile.core.conn, owner_id)\r\nAttributeError: module 'libqtile.backend.x11.xcbq' has no attribute 'Window'\r\n```\n", "before_files": [{"content": "# Copyright (c) 2014 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Adi Sieker\n# Copyright (c) 2014 Tycho Andersen\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nfrom libqtile import bar, hook\nfrom libqtile.backend.x11 import xcbq\nfrom libqtile.widget import base\n\n\nclass Clipboard(base._TextBox):\n \"\"\"Display current clipboard contents\"\"\"\n orientations = base.ORIENTATION_HORIZONTAL\n defaults = [\n (\"selection\", \"CLIPBOARD\",\n \"the selection to display(CLIPBOARD or PRIMARY)\"),\n (\"max_width\", 10, \"maximum number of characters to display \"\n \"(None for all, useful when width is bar.STRETCH)\"),\n (\"timeout\", 10,\n \"Default timeout (seconds) for display text, None to keep forever\"),\n (\"blacklist\", [\"keepassx\"],\n \"list with blacklisted wm_class, sadly not every \"\n \"clipboard window sets them, keepassx does.\"\n \"Clipboard contents from blacklisted wm_classes \"\n \"will be replaced by the value of ``blacklist_text``.\"),\n (\"blacklist_text\", \"***********\",\n \"text to display when the wm_class is blacklisted\")\n ]\n\n def __init__(self, width=bar.CALCULATED, **config):\n base._TextBox.__init__(self, \"\", width, **config)\n self.add_defaults(Clipboard.defaults)\n self.timeout_id = None\n\n def _configure(self, qtile, bar):\n base._TextBox._configure(self, qtile, bar)\n self.text = \"\"\n self.setup_hooks()\n\n def clear(self, *args):\n self.text = \"\"\n self.bar.draw()\n\n def is_blacklisted(self, owner_id):\n if not self.blacklist:\n return False\n\n if owner_id in self.qtile.windows_map:\n owner = self.qtile.windows_map[owner_id].window\n else:\n owner = xcbq.Window(self.qtile.core.conn, owner_id)\n\n owner_class = owner.get_wm_class()\n if owner_class:\n for wm_class in self.blacklist:\n if wm_class in owner_class:\n return True\n\n def setup_hooks(self):\n def hook_change(name, selection):\n if name != self.selection:\n return\n\n if self.is_blacklisted(selection[\"owner\"]):\n text = self.blacklist_text\n else:\n text = selection[\"selection\"].replace(\"\\n\", \" \")\n\n text = text.strip()\n if self.max_width is not None and len(text) > self.max_width:\n text = text[:self.max_width] + \"...\"\n\n self.text = text\n\n if self.timeout_id:\n self.timeout_id.cancel()\n self.timeout_id = None\n\n if self.timeout:\n self.timeout_id = self.timeout_add(self.timeout, self.clear)\n self.bar.draw()\n\n def hook_notify(name, selection):\n if name != self.selection:\n return\n\n if self.timeout_id:\n self.timeout_id.cancel()\n self.timeout_id = None\n\n # only clear if don't change don't apply in .5 seconds\n if self.timeout:\n self.timeout_id = self.timeout_add(self.timeout, self.clear)\n self.bar.draw()\n\n hook.subscribe.selection_notify(hook_notify)\n hook.subscribe.selection_change(hook_change)\n", "path": "libqtile/widget/clipboard.py"}]} | 1,947 | 131 |
gh_patches_debug_586 | rasdani/github-patches | git_diff | pex-tool__pex-1275 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.34
On the docket:
+ [x] Allow command-line arguments to be read from a file #1271
+ [x] Issue when running a module inside pex file #1018
+ [x] Guard against concurrent re-imports. #1270
+ [x] Ensure Pip logs to stderr. #1268
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.33"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.33"
+__version__ = "2.1.34"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.33\"\n+__version__ = \"2.1.34\"\n", "issue": "Release 2.1.34\nOn the docket:\r\n+ [x] Allow command-line arguments to be read from a file #1271\r\n+ [x] Issue when running a module inside pex file #1018\r\n+ [x] Guard against concurrent re-imports. #1270\r\n+ [x] Ensure Pip logs to stderr. #1268\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.33\"\n", "path": "pex/version.py"}]} | 670 | 97 |
gh_patches_debug_6675 | rasdani/github-patches | git_diff | fal-ai__dbt-fal-197 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Too many messages received before initialization
> mmeasic: Hey, I get this log message on dbt version 0.21.0:
```Logged from file /Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/parser/manifest.py, line 792
Traceback (most recent call last):
File "/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/logbook/handlers.py", line 216, in handle
self.emit(record)
File "/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/logger.py", line 478, in emit
assert len(self._msg_buffer) < self._bufmax, \
AssertionError: too many messages received before initilization!
```
*****
> jstrom40: did your job run after it gave you this error message? i have had this problem when i have had too many threads set up in dbt. i also had it when i tried to run the fal tool but my actual job still ran after it popped out this message
*****
> mmeasic: It did run.
> I actually have 4 threads set for the target
[Thread link](https://discord.com/channels/908693336280432750/908693336280432755/930791100803850283)
</issue>
<code>
[start of src/fal/cli/cli.py]
1 from typing import List
2 import sys
3 from dbt.logger import log_manager, GLOBAL_LOGGER as logger
4 from fal.cli.flow_runner import fal_flow_run
5 from faldbt.lib import DBT_VCURRENT, DBT_V1
6 from .args import parse_args
7 from .fal_runner import fal_run
8 from fal.telemetry import telemetry
9
10
11 @telemetry.log_call("cli")
12 def cli(argv: List[str] = sys.argv):
13 parsed = parse_args(argv[1:])
14
15 # TODO: remove `action="extend"` to match exactly what dbt does
16 selects_count = (
17 argv.count("-s")
18 + argv.count("--select")
19 + argv.count("-m")
20 + argv.count("--model")
21 )
22 exclude_count = argv.count("--exclude")
23 script_count = argv.count("--script")
24
25 if parsed.disable_logging:
26 logger.disable()
27 # Re-enable logging for 1.0.0 through old API of logger
28 elif DBT_VCURRENT.compare(DBT_V1) >= 0:
29 if logger.disabled:
30 logger.enable()
31
32 with log_manager.applicationbound():
33 if parsed.debug:
34 log_manager.set_debug()
35
36 if parsed.command == "flow":
37 if parsed.flow_command == "run":
38 fal_flow_run(parsed)
39
40 elif parsed.command == "run":
41 fal_run(
42 parsed,
43 selects_count=selects_count,
44 exclude_count=exclude_count,
45 script_count=script_count,
46 )
47
[end of src/fal/cli/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/fal/cli/cli.py b/src/fal/cli/cli.py
--- a/src/fal/cli/cli.py
+++ b/src/fal/cli/cli.py
@@ -20,6 +20,10 @@
exclude_count = argv.count("--exclude")
script_count = argv.count("--script")
+ # Disabling the dbt.logger.DelayedFileHandler manually
+ # since we do not use the new dbt logging system
+ # This fixes issue https://github.com/fal-ai/fal/issues/97
+ log_manager.set_path(None)
if parsed.disable_logging:
logger.disable()
# Re-enable logging for 1.0.0 through old API of logger
| {"golden_diff": "diff --git a/src/fal/cli/cli.py b/src/fal/cli/cli.py\n--- a/src/fal/cli/cli.py\n+++ b/src/fal/cli/cli.py\n@@ -20,6 +20,10 @@\n exclude_count = argv.count(\"--exclude\")\n script_count = argv.count(\"--script\")\n \n+ # Disabling the dbt.logger.DelayedFileHandler manually\n+ # since we do not use the new dbt logging system\n+ # This fixes issue https://github.com/fal-ai/fal/issues/97\n+ log_manager.set_path(None)\n if parsed.disable_logging:\n logger.disable()\n # Re-enable logging for 1.0.0 through old API of logger\n", "issue": "[Bug] Too many messages received before initialization\n> mmeasic: Hey, I get this log message on dbt version 0.21.0:\r\n\r\n```Logged from file /Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/parser/manifest.py, line 792\r\nTraceback (most recent call last):\r\n File \"/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/logbook/handlers.py\", line 216, in handle\r\n self.emit(record)\r\n File \"/Users/mmeasic/.virtualenvs/bi-etl-dbt/lib/python3.8/site-packages/dbt/logger.py\", line 478, in emit\r\n assert len(self._msg_buffer) < self._bufmax, \\\r\nAssertionError: too many messages received before initilization!\r\n```\r\n\r\n*****\r\n\r\n> jstrom40: did your job run after it gave you this error message? i have had this problem when i have had too many threads set up in dbt. i also had it when i tried to run the fal tool but my actual job still ran after it popped out this message\r\n\r\n*****\r\n\r\n> mmeasic: It did run.\r\n> I actually have 4 threads set for the target\r\n\r\n[Thread link](https://discord.com/channels/908693336280432750/908693336280432755/930791100803850283)\n", "before_files": [{"content": "from typing import List\nimport sys\nfrom dbt.logger import log_manager, GLOBAL_LOGGER as logger\nfrom fal.cli.flow_runner import fal_flow_run\nfrom faldbt.lib import DBT_VCURRENT, DBT_V1\nfrom .args import parse_args\nfrom .fal_runner import fal_run\nfrom fal.telemetry import telemetry\n\n\[email protected]_call(\"cli\")\ndef cli(argv: List[str] = sys.argv):\n parsed = parse_args(argv[1:])\n\n # TODO: remove `action=\"extend\"` to match exactly what dbt does\n selects_count = (\n argv.count(\"-s\")\n + argv.count(\"--select\")\n + argv.count(\"-m\")\n + argv.count(\"--model\")\n )\n exclude_count = argv.count(\"--exclude\")\n script_count = argv.count(\"--script\")\n\n if parsed.disable_logging:\n logger.disable()\n # Re-enable logging for 1.0.0 through old API of logger\n elif DBT_VCURRENT.compare(DBT_V1) >= 0:\n if logger.disabled:\n logger.enable()\n\n with log_manager.applicationbound():\n if parsed.debug:\n log_manager.set_debug()\n\n if parsed.command == \"flow\":\n if parsed.flow_command == \"run\":\n fal_flow_run(parsed)\n\n elif parsed.command == \"run\":\n fal_run(\n parsed,\n selects_count=selects_count,\n exclude_count=exclude_count,\n script_count=script_count,\n )\n", "path": "src/fal/cli/cli.py"}]} | 1,277 | 156 |
gh_patches_debug_23183 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-3067 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
'PathManagerBase' object has no attribute 'makedirs'
In attempting to create the tensorboard directory with PathManager we're calling a nonexistent function.
To repro:
```bash
$ python -m parlai.scripts.train_model -t personachat -m transformer/ranker -mf /tmp/model_tr6 --n-layers 1 --embedding-size 300 --ffn-size 600 --n-heads 4 --num-epochs 2 -veps 0.25 -bs 64 -lr 0.001 --dropout 0.1 --embedding-type fasttext_cc --candidates batch --tensorboard-log true
```
Exception hit:
```
File "/Users/spoff/ParlAI/parlai/core/logs.py", line 56, in __init__
PathManager.makedirs(tbpath)
AttributeError: 'PathManagerBase' object has no attribute 'makedirs'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7
8 import sys
9
10 from setuptools import setup, find_packages
11
12 VERSION = '0.9.1' # if you update, update parlai/__init__.py too!
13
14 if sys.version_info < (3, 6):
15 sys.exit('Sorry, Python >=3.6 is required for ParlAI.')
16
17 with open('README.md', encoding="utf8") as f:
18 # strip the header and badges etc
19 readme = f.read().split('--------------------')[-1]
20
21 with open('requirements.txt') as f:
22 reqs = []
23 for line in f:
24 line = line.strip()
25 reqs.append(line.split('==')[0])
26
27
28 if __name__ == '__main__':
29 setup(
30 name='parlai',
31 version=VERSION,
32 description='Unified platform for dialogue research.',
33 long_description=readme,
34 long_description_content_type='text/markdown',
35 url='http://parl.ai/',
36 python_requires='>=3.6',
37 packages=find_packages(
38 exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal*')
39 ),
40 install_requires=reqs,
41 include_package_data=True,
42 package_data={'': ['*.txt', '*.md']},
43 entry_points={
44 "flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"],
45 "console_scripts": ["parlai=parlai.__main__:main"],
46 },
47 classifiers=[
48 "Programming Language :: Python :: 3",
49 "License :: OSI Approved :: MIT License",
50 "Topic :: Scientific/Engineering :: Artificial Intelligence",
51 "Natural Language :: English",
52 ],
53 )
54
[end of setup.py]
[start of parlai/core/logs.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6 """
7 Log metrics to tensorboard.
8
9 This file provides interface to log any metrics in tensorboard, could be
10 extended to any other tool like visdom.
11
12 .. code-block: none
13
14 tensorboard --logdir <PARLAI_DATA/tensorboard> --port 8888.
15 """
16
17 import json
18 import numbers
19 from parlai.core.opt import Opt
20 from parlai.core.metrics import Metric
21 from parlai.utils.io import PathManager
22 import parlai.utils.logging as logging
23
24
25 class TensorboardLogger(object):
26 """
27 Log objects to tensorboard.
28 """
29
30 @staticmethod
31 def add_cmdline_args(argparser):
32 """
33 Add tensorboard CLI args.
34 """
35 logger = argparser.add_argument_group('Tensorboard Arguments')
36 logger.add_argument(
37 '-tblog',
38 '--tensorboard-log',
39 type='bool',
40 default=False,
41 help="Tensorboard logging of metrics, default is %(default)s",
42 hidden=False,
43 )
44
45 def __init__(self, opt: Opt):
46 try:
47 # tensorboard is a very expensive thing to import. Wait until the
48 # last second to import it.
49 from tensorboardX import SummaryWriter
50 except ImportError:
51 raise ImportError('Please run `pip install tensorboard tensorboardX`.')
52
53 tbpath = opt['model_file'] + '.tensorboard'
54 logging.debug(f'Saving tensorboard logs to: {tbpath}')
55 if not PathManager.exists(tbpath):
56 PathManager.makedirs(tbpath)
57 self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))
58
59 def log_metrics(self, setting, step, report):
60 """
61 Add all metrics from tensorboard_metrics opt key.
62
63 :param setting:
64 One of train/valid/test. Will be used as the title for the graph.
65 :param step:
66 Number of parleys
67 :param report:
68 The report to log
69 """
70 for k, v in report.items():
71 if isinstance(v, numbers.Number):
72 self.writer.add_scalar(f'{k}/{setting}', v, global_step=step)
73 elif isinstance(v, Metric):
74 self.writer.add_scalar(f'{k}/{setting}', v.value(), global_step=step)
75 else:
76 logging.error(f'k {k} v {v} is not a number')
77
78 def flush(self):
79 self.writer.flush()
80
[end of parlai/core/logs.py]
[start of parlai/__init__.py]
1 #!/usr/bin/env python3
2
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 __version__ = '0.9.1'
8
[end of parlai/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parlai/__init__.py b/parlai/__init__.py
--- a/parlai/__init__.py
+++ b/parlai/__init__.py
@@ -4,4 +4,4 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-__version__ = '0.9.1'
+__version__ = '0.9.2'
diff --git a/parlai/core/logs.py b/parlai/core/logs.py
--- a/parlai/core/logs.py
+++ b/parlai/core/logs.py
@@ -53,7 +53,7 @@
tbpath = opt['model_file'] + '.tensorboard'
logging.debug(f'Saving tensorboard logs to: {tbpath}')
if not PathManager.exists(tbpath):
- PathManager.makedirs(tbpath)
+ PathManager.mkdirs(tbpath)
self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))
def log_metrics(self, setting, step, report):
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,7 +9,7 @@
from setuptools import setup, find_packages
-VERSION = '0.9.1' # if you update, update parlai/__init__.py too!
+VERSION = '0.9.2' # if you update, update parlai/__init__.py too!
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >=3.6 is required for ParlAI.')
| {"golden_diff": "diff --git a/parlai/__init__.py b/parlai/__init__.py\n--- a/parlai/__init__.py\n+++ b/parlai/__init__.py\n@@ -4,4 +4,4 @@\n # This source code is licensed under the MIT license found in the\n # LICENSE file in the root directory of this source tree.\n \n-__version__ = '0.9.1'\n+__version__ = '0.9.2'\ndiff --git a/parlai/core/logs.py b/parlai/core/logs.py\n--- a/parlai/core/logs.py\n+++ b/parlai/core/logs.py\n@@ -53,7 +53,7 @@\n tbpath = opt['model_file'] + '.tensorboard'\n logging.debug(f'Saving tensorboard logs to: {tbpath}')\n if not PathManager.exists(tbpath):\n- PathManager.makedirs(tbpath)\n+ PathManager.mkdirs(tbpath)\n self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))\n \n def log_metrics(self, setting, step, report):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n \n from setuptools import setup, find_packages\n \n-VERSION = '0.9.1' # if you update, update parlai/__init__.py too!\n+VERSION = '0.9.2' # if you update, update parlai/__init__.py too!\n \n if sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n", "issue": "'PathManagerBase' object has no attribute 'makedirs'\nIn attempting to create the tensorboard directory with PathManager we're calling a nonexistent function.\r\n\r\nTo repro:\r\n```bash\r\n$ python -m parlai.scripts.train_model -t personachat -m transformer/ranker -mf /tmp/model_tr6 --n-layers 1 --embedding-size 300 --ffn-size 600 --n-heads 4 --num-epochs 2 -veps 0.25 -bs 64 -lr 0.001 --dropout 0.1 --embedding-type fasttext_cc --candidates batch --tensorboard-log true\r\n```\r\n\r\nException hit:\r\n```\r\nFile \"/Users/spoff/ParlAI/parlai/core/logs.py\", line 56, in __init__\r\n PathManager.makedirs(tbpath)\r\nAttributeError: 'PathManagerBase' object has no attribute 'makedirs'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport sys\n\nfrom setuptools import setup, find_packages\n\nVERSION = '0.9.1' # if you update, update parlai/__init__.py too!\n\nif sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n # strip the header and badges etc\n readme = f.read().split('--------------------')[-1]\n\nwith open('requirements.txt') as f:\n reqs = []\n for line in f:\n line = line.strip()\n reqs.append(line.split('==')[0])\n\n\nif __name__ == '__main__':\n setup(\n name='parlai',\n version=VERSION,\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n url='http://parl.ai/',\n python_requires='>=3.6',\n packages=find_packages(\n exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal*')\n ),\n install_requires=reqs,\n include_package_data=True,\n package_data={'': ['*.txt', '*.md']},\n entry_points={\n \"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"],\n \"console_scripts\": [\"parlai=parlai.__main__:main\"],\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Natural Language :: English\",\n ],\n )\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nLog metrics to tensorboard.\n\nThis file provides interface to log any metrics in tensorboard, could be\nextended to any other tool like visdom.\n\n.. code-block: none\n\n tensorboard --logdir <PARLAI_DATA/tensorboard> --port 8888.\n\"\"\"\n\nimport json\nimport numbers\nfrom parlai.core.opt import Opt\nfrom parlai.core.metrics import Metric\nfrom parlai.utils.io import PathManager\nimport parlai.utils.logging as logging\n\n\nclass TensorboardLogger(object):\n \"\"\"\n Log objects to tensorboard.\n \"\"\"\n\n @staticmethod\n def add_cmdline_args(argparser):\n \"\"\"\n Add tensorboard CLI args.\n \"\"\"\n logger = argparser.add_argument_group('Tensorboard Arguments')\n logger.add_argument(\n '-tblog',\n '--tensorboard-log',\n type='bool',\n default=False,\n help=\"Tensorboard logging of metrics, default is %(default)s\",\n hidden=False,\n )\n\n def __init__(self, opt: Opt):\n try:\n # tensorboard is a very expensive thing to import. Wait until the\n # last second to import it.\n from tensorboardX import SummaryWriter\n except ImportError:\n raise ImportError('Please run `pip install tensorboard tensorboardX`.')\n\n tbpath = opt['model_file'] + '.tensorboard'\n logging.debug(f'Saving tensorboard logs to: {tbpath}')\n if not PathManager.exists(tbpath):\n PathManager.makedirs(tbpath)\n self.writer = SummaryWriter(tbpath, comment=json.dumps(opt))\n\n def log_metrics(self, setting, step, report):\n \"\"\"\n Add all metrics from tensorboard_metrics opt key.\n\n :param setting:\n One of train/valid/test. Will be used as the title for the graph.\n :param step:\n Number of parleys\n :param report:\n The report to log\n \"\"\"\n for k, v in report.items():\n if isinstance(v, numbers.Number):\n self.writer.add_scalar(f'{k}/{setting}', v, global_step=step)\n elif isinstance(v, Metric):\n self.writer.add_scalar(f'{k}/{setting}', v.value(), global_step=step)\n else:\n logging.error(f'k {k} v {v} is not a number')\n\n def flush(self):\n self.writer.flush()\n", "path": "parlai/core/logs.py"}, {"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n__version__ = '0.9.1'\n", "path": "parlai/__init__.py"}]} | 2,034 | 357 |
gh_patches_debug_17281 | rasdani/github-patches | git_diff | kivy__kivy-3303 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't create package for windows with kivy 1.9 portable
I'm looking to port an existing kivy 1.8 project to kivy 1.9. I've just downloaded the portable version and have the application working.
However when packaging the app using pyinstaller and the instructions on http://kivy.org/docs/guide/packaging-windows.html the app packages, but on execution immediately fails with error:
```
Traceback (most recent call last):
File "<string>", line 34, in <module>
ImportError: No module named pygame.pkgdata
```
I've tried using my old .spec file and generating a new one with exactly the same results.
I'm a bit mystified where this is coming from as pygame isn't imported anywhere in my application and I thought it had been replaced with sdl2 in kivy 1.9. I'm also confused that the application works when run directly.
Anyone come across this issue or can point me in the right direction?
</issue>
<code>
[start of kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py]
1 from os.path import join, dirname
2 from os import environ, chdir, putenv
3 import sys
4
5 root = 'kivy_install'
6 if hasattr(sys, '_MEIPASS'):
7 # PyInstaller >= 1.6
8 chdir(sys._MEIPASS)
9 root = join(sys._MEIPASS, root)
10 elif '_MEIPASS2' in environ:
11 # PyInstaller < 1.6 (tested on 1.5 only)
12 chdir(environ['_MEIPASS2'])
13 root = join(environ['_MEIPASS2'], root)
14 else:
15 chdir(dirname(sys.argv[0]))
16 root = join(dirname(sys.argv[0]), root)
17
18
19 sys.path += [join(root, '_libs')]
20
21 if sys.platform == 'darwin':
22 sitepackages = join(root, '..', 'sitepackages')
23 sys.path += [sitepackages, join(sitepackages, 'gst-0.10')]
24 putenv('GST_REGISTRY_FORK', 'no')
25
26 environ['GST_PLUGIN_PATH'] = join(root, '..', 'gst-plugins')
27 environ['KIVY_DATA_DIR'] = join(root, 'data')
28 environ['KIVY_EXTS_DIR'] = join(root, 'extensions')
29 environ['KIVY_MODULES_DIR'] = join(root, 'modules')
30 environ['KIVY_EMBED'] = '1'
31
32 # Monkey-patch pygame to get around an issue with Pygame window icon and
33 # PyInstaller 2.1. See kivy issue #1638
34 # Uncomment the following to package pygame
35 #import pygame.pkgdata
36 #_original_getResource = pygame.pkgdata.getResource
37 #
38 #
39 #def getResource(identifier, *args, **kwargs):
40 # if identifier == 'pygame_icon.tiff':
41 # raise IOError()
42 # return _original_getResource(identifier, *args, **kwargs)
43 #pygame.pkgdata.getResource = getResource
44
[end of kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
--- a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
+++ b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py
@@ -29,15 +29,17 @@
environ['KIVY_MODULES_DIR'] = join(root, 'modules')
environ['KIVY_EMBED'] = '1'
+
# Monkey-patch pygame to get around an issue with Pygame window icon and
# PyInstaller 2.1. See kivy issue #1638
-# Uncomment the following to package pygame
-#import pygame.pkgdata
-#_original_getResource = pygame.pkgdata.getResource
-#
-#
-#def getResource(identifier, *args, **kwargs):
-# if identifier == 'pygame_icon.tiff':
-# raise IOError()
-# return _original_getResource(identifier, *args, **kwargs)
-#pygame.pkgdata.getResource = getResource
+def getResource(identifier, *args, **kwargs):
+ if identifier == 'pygame_icon.tiff':
+ raise IOError()
+ return _original_getResource(identifier, *args, **kwargs)
+
+try:
+ import pygame.pkgdata
+ _original_getResource = pygame.pkgdata.getResource
+ pygame.pkgdata.getResource = getResource
+except ImportError:
+ pass
| {"golden_diff": "diff --git a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n--- a/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n+++ b/kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py\n@@ -29,15 +29,17 @@\n environ['KIVY_MODULES_DIR'] = join(root, 'modules')\n environ['KIVY_EMBED'] = '1'\n \n+\n # Monkey-patch pygame to get around an issue with Pygame window icon and\n # PyInstaller 2.1. See kivy issue #1638\n-# Uncomment the following to package pygame\n-#import pygame.pkgdata\n-#_original_getResource = pygame.pkgdata.getResource\n-#\n-#\n-#def getResource(identifier, *args, **kwargs):\n-# if identifier == 'pygame_icon.tiff':\n-# raise IOError()\n-# return _original_getResource(identifier, *args, **kwargs)\n-#pygame.pkgdata.getResource = getResource\n+def getResource(identifier, *args, **kwargs):\n+ if identifier == 'pygame_icon.tiff':\n+ raise IOError()\n+ return _original_getResource(identifier, *args, **kwargs)\n+\n+try:\n+ import pygame.pkgdata\n+ _original_getResource = pygame.pkgdata.getResource\n+ pygame.pkgdata.getResource = getResource\n+except ImportError:\n+ pass\n", "issue": "Can't create package for windows with kivy 1.9 portable\nI'm looking to port an existing kivy 1.8 project to kivy 1.9. I've just downloaded the portable version and have the application working.\n\nHowever when packaging the app using pyinstaller and the instructions on http://kivy.org/docs/guide/packaging-windows.html the app packages, but on execution immediately fails with error:\n\n```\nTraceback (most recent call last):\n File \"<string>\", line 34, in <module>\nImportError: No module named pygame.pkgdata\n```\n\nI've tried using my old .spec file and generating a new one with exactly the same results.\n\nI'm a bit mystified where this is coming from as pygame isn't imported anywhere in my application and I thought it had been replaced with sdl2 in kivy 1.9. I'm also confused that the application works when run directly.\n\nAnyone come across this issue or can point me in the right direction?\n\n", "before_files": [{"content": "from os.path import join, dirname\nfrom os import environ, chdir, putenv\nimport sys\n\nroot = 'kivy_install'\nif hasattr(sys, '_MEIPASS'):\n # PyInstaller >= 1.6\n chdir(sys._MEIPASS)\n root = join(sys._MEIPASS, root)\nelif '_MEIPASS2' in environ:\n # PyInstaller < 1.6 (tested on 1.5 only)\n chdir(environ['_MEIPASS2'])\n root = join(environ['_MEIPASS2'], root)\nelse:\n chdir(dirname(sys.argv[0]))\n root = join(dirname(sys.argv[0]), root)\n\n\nsys.path += [join(root, '_libs')]\n\nif sys.platform == 'darwin':\n sitepackages = join(root, '..', 'sitepackages')\n sys.path += [sitepackages, join(sitepackages, 'gst-0.10')]\n putenv('GST_REGISTRY_FORK', 'no')\n\nenviron['GST_PLUGIN_PATH'] = join(root, '..', 'gst-plugins')\nenviron['KIVY_DATA_DIR'] = join(root, 'data')\nenviron['KIVY_EXTS_DIR'] = join(root, 'extensions')\nenviron['KIVY_MODULES_DIR'] = join(root, 'modules')\nenviron['KIVY_EMBED'] = '1'\n\n# Monkey-patch pygame to get around an issue with Pygame window icon and\n# PyInstaller 2.1. See kivy issue #1638\n# Uncomment the following to package pygame\n#import pygame.pkgdata\n#_original_getResource = pygame.pkgdata.getResource\n#\n#\n#def getResource(identifier, *args, **kwargs):\n# if identifier == 'pygame_icon.tiff':\n# raise IOError()\n# return _original_getResource(identifier, *args, **kwargs)\n#pygame.pkgdata.getResource = getResource\n", "path": "kivy/tools/packaging/pyinstaller_hooks/rt-hook-kivy.py"}]} | 1,239 | 320 |
gh_patches_debug_35684 | rasdani/github-patches | git_diff | ManageIQ__integration_tests-296 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Better YAML overriding
Now it does not take just the root element into the account, but it crawls throught the dictionary and only updates the values that are present in the new dictionary. It converts all dicts to Configs, other values than specified in override dict are not touched.
It also improves the `__getattribute__` behaviour - now it propagates the interface to the child nodes by converting all `dict` to `Config` before returning the value, so the dot operator can be used everywhere.
</issue>
<code>
[start of utils/conf_loader.py]
1 import os
2 from collections import OrderedDict
3
4 import py.path
5 import yaml
6 from yaml.loader import Loader
7
8
9 class OrderedYamlLoader(Loader):
10 def construct_yaml_map(self, node):
11 data = OrderedDict()
12 yield data
13 value = self.construct_mapping(node)
14 data.update(value)
15
16
17 class ConfigNotFoundException(Exception):
18 pass
19
20
21 class Config(dict):
22 """A dict subclass with knowledge of conf yamls and how to load them
23
24 Also supports descriptor access, e.g. conf.configfile
25 (compared to the normal dict access, conf['configfile'])
26 """
27 # Stash the exception on the class for convenience, e.g.
28 # try:
29 # conf[does_not_exist]
30 # except conf.NotFoundException
31 # ...
32 NotFoundException = ConfigNotFoundException
33
34 # Support for descriptor access, e.g. instance.attrname
35 # Note that this is only on the get side, for support of nefarious things
36 # like setting and deleting, use the normal dict interface.
37 def __getattribute__(self, attr):
38 # Attempt normal object attr lookup; delegate to the dict interface if that fails
39 try:
40 return super(Config, self).__getattribute__(attr)
41 except AttributeError:
42 return self[attr]
43
44 def __getitem__(self, key):
45 # Attempt a normal dict lookup to pull a cached conf
46 try:
47 return super(Config, self).__getitem__(key)
48 except KeyError:
49 # Cache miss, load the requested yaml
50 yaml_dict = load_yaml(key)
51
52 # Graft in local yaml updates if they're available
53 try:
54 local_yaml = '%s.local' % key
55 local_yaml_dict = load_yaml(local_yaml)
56 yaml_dict.update(local_yaml_dict)
57 except ConfigNotFoundException:
58 pass
59
60 # Returning self[key] instead of yaml_dict as a small sanity check
61 self[key] = yaml_dict
62 return self[key]
63
64
65 def load_yaml(filename=None):
66 # Find the requested yaml in the config dir, relative to this file's location
67 # (aiming for cfme_tests/config)
68 this_file = os.path.abspath(__file__)
69 path = py.path.local(this_file).new(basename='../conf/%s.yaml' % filename)
70
71 if path.check():
72 with path.open() as config_fh:
73 return yaml.load(config_fh, Loader=OrderedYamlLoader)
74 else:
75 msg = 'Unable to load configuration file at %s' % path
76 raise ConfigNotFoundException(msg)
77
[end of utils/conf_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/conf_loader.py b/utils/conf_loader.py
--- a/utils/conf_loader.py
+++ b/utils/conf_loader.py
@@ -1,17 +1,19 @@
import os
-from collections import OrderedDict
import py.path
import yaml
from yaml.loader import Loader
-class OrderedYamlLoader(Loader):
+class YamlConfigLoader(Loader):
+ # Override the root yaml node to be a RecursiveUpdateDict
def construct_yaml_map(self, node):
- data = OrderedDict()
+ data = RecursiveUpdateDict()
yield data
value = self.construct_mapping(node)
data.update(value)
+# Do the same for child nodes of the yaml mapping type
+YamlConfigLoader.add_constructor('tag:yaml.org,2002:map', YamlConfigLoader.construct_yaml_map)
class ConfigNotFoundException(Exception):
@@ -62,6 +64,43 @@
return self[key]
+class RecursiveUpdateDict(dict):
+ def update(self, new_data):
+ """ More intelligent dictionary update.
+
+ This method changes just data that have been changed. How does it work?
+ Imagine you want to change just VM name, other things should stay the same.
+
+ Original config:
+ something:
+ somewhere:
+ VM:
+ a: 1
+ b: 2
+ name: qwer
+ c: 3
+
+ Instead of copying the whole part from original to the override with just 'name' changed,
+ you will write this:
+
+ something:
+ somewhere:
+ VM:
+ name: tzui
+
+ This digging deeper affects only dictionary values. Lists are unaffected! And so do other
+ types.
+
+ Args:
+ new_data: Update data.
+ """
+ for key, value in new_data.iteritems():
+ if isinstance(value, type(self)) and key in self:
+ type(self).update(self[key], value)
+ else:
+ self[key] = new_data[key]
+
+
def load_yaml(filename=None):
# Find the requested yaml in the config dir, relative to this file's location
# (aiming for cfme_tests/config)
@@ -70,7 +109,7 @@
if path.check():
with path.open() as config_fh:
- return yaml.load(config_fh, Loader=OrderedYamlLoader)
+ return yaml.load(config_fh, Loader=YamlConfigLoader)
else:
msg = 'Unable to load configuration file at %s' % path
raise ConfigNotFoundException(msg)
| {"golden_diff": "diff --git a/utils/conf_loader.py b/utils/conf_loader.py\n--- a/utils/conf_loader.py\n+++ b/utils/conf_loader.py\n@@ -1,17 +1,19 @@\n import os\n-from collections import OrderedDict\n \n import py.path\n import yaml\n from yaml.loader import Loader\n \n \n-class OrderedYamlLoader(Loader):\n+class YamlConfigLoader(Loader):\n+ # Override the root yaml node to be a RecursiveUpdateDict\n def construct_yaml_map(self, node):\n- data = OrderedDict()\n+ data = RecursiveUpdateDict()\n yield data\n value = self.construct_mapping(node)\n data.update(value)\n+# Do the same for child nodes of the yaml mapping type\n+YamlConfigLoader.add_constructor('tag:yaml.org,2002:map', YamlConfigLoader.construct_yaml_map)\n \n \n class ConfigNotFoundException(Exception):\n@@ -62,6 +64,43 @@\n return self[key]\n \n \n+class RecursiveUpdateDict(dict):\n+ def update(self, new_data):\n+ \"\"\" More intelligent dictionary update.\n+\n+ This method changes just data that have been changed. How does it work?\n+ Imagine you want to change just VM name, other things should stay the same.\n+\n+ Original config:\n+ something:\n+ somewhere:\n+ VM:\n+ a: 1\n+ b: 2\n+ name: qwer\n+ c: 3\n+\n+ Instead of copying the whole part from original to the override with just 'name' changed,\n+ you will write this:\n+\n+ something:\n+ somewhere:\n+ VM:\n+ name: tzui\n+\n+ This digging deeper affects only dictionary values. Lists are unaffected! And so do other\n+ types.\n+\n+ Args:\n+ new_data: Update data.\n+ \"\"\"\n+ for key, value in new_data.iteritems():\n+ if isinstance(value, type(self)) and key in self:\n+ type(self).update(self[key], value)\n+ else:\n+ self[key] = new_data[key]\n+\n+\n def load_yaml(filename=None):\n # Find the requested yaml in the config dir, relative to this file's location\n # (aiming for cfme_tests/config)\n@@ -70,7 +109,7 @@\n \n if path.check():\n with path.open() as config_fh:\n- return yaml.load(config_fh, Loader=OrderedYamlLoader)\n+ return yaml.load(config_fh, Loader=YamlConfigLoader)\n else:\n msg = 'Unable to load configuration file at %s' % path\n raise ConfigNotFoundException(msg)\n", "issue": "Better YAML overriding\nNow it does not take just the root element into the account, but it crawls throught the dictionary and only updates the values that are present in the new dictionary. It converts all dicts to Configs, other values than specified in override dict are not touched.\n\nIt also improves the `__getattribute__` behaviour - now it propagates the interface to the child nodes by converting all `dict` to `Config` before returning the value, so the dot operator can be used everywhere.\n\n", "before_files": [{"content": "import os\nfrom collections import OrderedDict\n\nimport py.path\nimport yaml\nfrom yaml.loader import Loader\n\n\nclass OrderedYamlLoader(Loader):\n def construct_yaml_map(self, node):\n data = OrderedDict()\n yield data\n value = self.construct_mapping(node)\n data.update(value)\n\n\nclass ConfigNotFoundException(Exception):\n pass\n\n\nclass Config(dict):\n \"\"\"A dict subclass with knowledge of conf yamls and how to load them\n\n Also supports descriptor access, e.g. conf.configfile\n (compared to the normal dict access, conf['configfile'])\n \"\"\"\n # Stash the exception on the class for convenience, e.g.\n # try:\n # conf[does_not_exist]\n # except conf.NotFoundException\n # ...\n NotFoundException = ConfigNotFoundException\n\n # Support for descriptor access, e.g. instance.attrname\n # Note that this is only on the get side, for support of nefarious things\n # like setting and deleting, use the normal dict interface.\n def __getattribute__(self, attr):\n # Attempt normal object attr lookup; delegate to the dict interface if that fails\n try:\n return super(Config, self).__getattribute__(attr)\n except AttributeError:\n return self[attr]\n\n def __getitem__(self, key):\n # Attempt a normal dict lookup to pull a cached conf\n try:\n return super(Config, self).__getitem__(key)\n except KeyError:\n # Cache miss, load the requested yaml\n yaml_dict = load_yaml(key)\n\n # Graft in local yaml updates if they're available\n try:\n local_yaml = '%s.local' % key\n local_yaml_dict = load_yaml(local_yaml)\n yaml_dict.update(local_yaml_dict)\n except ConfigNotFoundException:\n pass\n\n # Returning self[key] instead of yaml_dict as a small sanity check\n self[key] = yaml_dict\n return self[key]\n\n\ndef load_yaml(filename=None):\n # Find the requested yaml in the config dir, relative to this file's location\n # (aiming for cfme_tests/config)\n this_file = os.path.abspath(__file__)\n path = py.path.local(this_file).new(basename='../conf/%s.yaml' % filename)\n\n if path.check():\n with path.open() as config_fh:\n return yaml.load(config_fh, Loader=OrderedYamlLoader)\n else:\n msg = 'Unable to load configuration file at %s' % path\n raise ConfigNotFoundException(msg)\n", "path": "utils/conf_loader.py"}]} | 1,318 | 564 |
gh_patches_debug_20213 | rasdani/github-patches | git_diff | ray-project__ray-1523 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[rllib] [docs] Document multi-agent support
We should document the new multi-agent support in rllib and have some examples in readthedocs. It would be good to cover the supported cases and which ones are not yet supported (or provide workarounds).
</issue>
<code>
[start of python/ray/rllib/examples/multiagent_pendulum_env.py]
1 from gym.spaces import Box, Tuple
2 from gym.utils import seeding
3 from gym.envs.classic_control.pendulum import PendulumEnv
4 import numpy as np
5
6 """
7 Multiagent pendulum that sums its torques to generate an action
8 """
9
10
11 class MultiAgentPendulumEnv(PendulumEnv):
12 metadata = {
13 'render.modes': ['human', 'rgb_array'],
14 'video.frames_per_second': 30
15 }
16
17 def __init__(self):
18 self.max_speed = 8
19 self.max_torque = 2.
20 self.dt = .05
21 self.viewer = None
22
23 high = np.array([1., 1., self.max_speed])
24 self.action_space = [Box(low=-self.max_torque / 2,
25 high=self.max_torque / 2, shape=(1,))
26 for _ in range(2)]
27 self.observation_space = Tuple(tuple(Box(low=-high, high=high)
28 for _ in range(2)))
29
30 self._seed()
31
32 def _seed(self, seed=None):
33 self.np_random, seed = seeding.np_random(seed)
34 return [seed]
35
36 def _step(self, u):
37 th, thdot = self.state # th := theta
38
39 summed_u = np.sum(u)
40 g = 10.
41 m = 1.
42 length = 1.
43 dt = self.dt
44
45 summed_u = np.clip(summed_u, -self.max_torque, self.max_torque)
46 self.last_u = summed_u # for rendering
47 costs = self.angle_normalize(th) ** 2 + .1 * thdot ** 2 + \
48 .001 * (summed_u ** 2)
49
50 newthdot = thdot + (-3 * g / (2 * length) * np.sin(th + np.pi) +
51 3. / (m * length ** 2) * summed_u) * dt
52 newth = th + newthdot * dt
53 newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
54
55 self.state = np.array([newth, newthdot])
56 return self._get_obs(), -costs, False, {}
57
58 def _reset(self):
59 high = np.array([np.pi, 1])
60 self.state = self.np_random.uniform(low=-high, high=high)
61 self.last_u = None
62 return self._get_obs()
63
64 def _get_obs(self):
65 theta, thetadot = self.state
66 return [np.array([np.cos(theta), np.sin(theta), thetadot])
67 for _ in range(2)]
68
69 def angle_normalize(self, x):
70 return (((x + np.pi) % (2 * np.pi)) - np.pi)
71
[end of python/ray/rllib/examples/multiagent_pendulum_env.py]
[start of python/ray/rllib/examples/multiagent_mountaincar_env.py]
1 import math
2 from gym.spaces import Box, Tuple, Discrete
3 import numpy as np
4 from gym.envs.classic_control.mountain_car import MountainCarEnv
5
6 """
7 Multiagent mountain car that sums and then
8 averages its actions to produce the velocity
9 """
10
11
12 class MultiAgentMountainCarEnv(MountainCarEnv):
13 def __init__(self):
14 self.min_position = -1.2
15 self.max_position = 0.6
16 self.max_speed = 0.07
17 self.goal_position = 0.5
18
19 self.low = np.array([self.min_position, -self.max_speed])
20 self.high = np.array([self.max_position, self.max_speed])
21
22 self.viewer = None
23
24 self.action_space = [Discrete(3) for _ in range(2)]
25 self.observation_space = Tuple(tuple(Box(self.low, self.high)
26 for _ in range(2)))
27
28 self._seed()
29 self.reset()
30
31 def _step(self, action):
32 summed_act = 0.5 * np.sum(action)
33
34 position, velocity = self.state
35 velocity += (summed_act - 1) * 0.001
36 velocity += math.cos(3 * position) * (-0.0025)
37 velocity = np.clip(velocity, -self.max_speed, self.max_speed)
38 position += velocity
39 position = np.clip(position, self.min_position, self.max_position)
40 if (position == self.min_position and velocity < 0):
41 velocity = 0
42
43 done = bool(position >= self.goal_position)
44
45 reward = position
46
47 self.state = (position, velocity)
48 return [np.array(self.state) for _ in range(2)], reward, done, {}
49
50 def _reset(self):
51 self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
52 return [np.array(self.state) for _ in range(2)]
53
[end of python/ray/rllib/examples/multiagent_mountaincar_env.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/rllib/examples/multiagent_mountaincar_env.py b/python/ray/rllib/examples/multiagent_mountaincar_env.py
--- a/python/ray/rllib/examples/multiagent_mountaincar_env.py
+++ b/python/ray/rllib/examples/multiagent_mountaincar_env.py
@@ -22,8 +22,8 @@
self.viewer = None
self.action_space = [Discrete(3) for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(self.low, self.high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(self.low, self.high) for _ in range(2)])
self._seed()
self.reset()
diff --git a/python/ray/rllib/examples/multiagent_pendulum_env.py b/python/ray/rllib/examples/multiagent_pendulum_env.py
--- a/python/ray/rllib/examples/multiagent_pendulum_env.py
+++ b/python/ray/rllib/examples/multiagent_pendulum_env.py
@@ -24,8 +24,8 @@
self.action_space = [Box(low=-self.max_torque / 2,
high=self.max_torque / 2, shape=(1,))
for _ in range(2)]
- self.observation_space = Tuple(tuple(Box(low=-high, high=high)
- for _ in range(2)))
+ self.observation_space = Tuple([
+ Box(low=-high, high=high) for _ in range(2)])
self._seed()
| {"golden_diff": "diff --git a/python/ray/rllib/examples/multiagent_mountaincar_env.py b/python/ray/rllib/examples/multiagent_mountaincar_env.py\n--- a/python/ray/rllib/examples/multiagent_mountaincar_env.py\n+++ b/python/ray/rllib/examples/multiagent_mountaincar_env.py\n@@ -22,8 +22,8 @@\n self.viewer = None\n \n self.action_space = [Discrete(3) for _ in range(2)]\n- self.observation_space = Tuple(tuple(Box(self.low, self.high)\n- for _ in range(2)))\n+ self.observation_space = Tuple([\n+ Box(self.low, self.high) for _ in range(2)])\n \n self._seed()\n self.reset()\ndiff --git a/python/ray/rllib/examples/multiagent_pendulum_env.py b/python/ray/rllib/examples/multiagent_pendulum_env.py\n--- a/python/ray/rllib/examples/multiagent_pendulum_env.py\n+++ b/python/ray/rllib/examples/multiagent_pendulum_env.py\n@@ -24,8 +24,8 @@\n self.action_space = [Box(low=-self.max_torque / 2,\n high=self.max_torque / 2, shape=(1,))\n for _ in range(2)]\n- self.observation_space = Tuple(tuple(Box(low=-high, high=high)\n- for _ in range(2)))\n+ self.observation_space = Tuple([\n+ Box(low=-high, high=high) for _ in range(2)])\n \n self._seed()\n", "issue": "[rllib] [docs] Document multi-agent support\nWe should document the new multi-agent support in rllib and have some examples in readthedocs. It would be good to cover the supported cases and which ones are not yet supported (or provide workarounds).\n", "before_files": [{"content": "from gym.spaces import Box, Tuple\nfrom gym.utils import seeding\nfrom gym.envs.classic_control.pendulum import PendulumEnv\nimport numpy as np\n\n\"\"\"\n Multiagent pendulum that sums its torques to generate an action\n\"\"\"\n\n\nclass MultiAgentPendulumEnv(PendulumEnv):\n metadata = {\n 'render.modes': ['human', 'rgb_array'],\n 'video.frames_per_second': 30\n }\n\n def __init__(self):\n self.max_speed = 8\n self.max_torque = 2.\n self.dt = .05\n self.viewer = None\n\n high = np.array([1., 1., self.max_speed])\n self.action_space = [Box(low=-self.max_torque / 2,\n high=self.max_torque / 2, shape=(1,))\n for _ in range(2)]\n self.observation_space = Tuple(tuple(Box(low=-high, high=high)\n for _ in range(2)))\n\n self._seed()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, u):\n th, thdot = self.state # th := theta\n\n summed_u = np.sum(u)\n g = 10.\n m = 1.\n length = 1.\n dt = self.dt\n\n summed_u = np.clip(summed_u, -self.max_torque, self.max_torque)\n self.last_u = summed_u # for rendering\n costs = self.angle_normalize(th) ** 2 + .1 * thdot ** 2 + \\\n .001 * (summed_u ** 2)\n\n newthdot = thdot + (-3 * g / (2 * length) * np.sin(th + np.pi) +\n 3. / (m * length ** 2) * summed_u) * dt\n newth = th + newthdot * dt\n newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)\n\n self.state = np.array([newth, newthdot])\n return self._get_obs(), -costs, False, {}\n\n def _reset(self):\n high = np.array([np.pi, 1])\n self.state = self.np_random.uniform(low=-high, high=high)\n self.last_u = None\n return self._get_obs()\n\n def _get_obs(self):\n theta, thetadot = self.state\n return [np.array([np.cos(theta), np.sin(theta), thetadot])\n for _ in range(2)]\n\n def angle_normalize(self, x):\n return (((x + np.pi) % (2 * np.pi)) - np.pi)\n", "path": "python/ray/rllib/examples/multiagent_pendulum_env.py"}, {"content": "import math\nfrom gym.spaces import Box, Tuple, Discrete\nimport numpy as np\nfrom gym.envs.classic_control.mountain_car import MountainCarEnv\n\n\"\"\"\nMultiagent mountain car that sums and then\naverages its actions to produce the velocity\n\"\"\"\n\n\nclass MultiAgentMountainCarEnv(MountainCarEnv):\n def __init__(self):\n self.min_position = -1.2\n self.max_position = 0.6\n self.max_speed = 0.07\n self.goal_position = 0.5\n\n self.low = np.array([self.min_position, -self.max_speed])\n self.high = np.array([self.max_position, self.max_speed])\n\n self.viewer = None\n\n self.action_space = [Discrete(3) for _ in range(2)]\n self.observation_space = Tuple(tuple(Box(self.low, self.high)\n for _ in range(2)))\n\n self._seed()\n self.reset()\n\n def _step(self, action):\n summed_act = 0.5 * np.sum(action)\n\n position, velocity = self.state\n velocity += (summed_act - 1) * 0.001\n velocity += math.cos(3 * position) * (-0.0025)\n velocity = np.clip(velocity, -self.max_speed, self.max_speed)\n position += velocity\n position = np.clip(position, self.min_position, self.max_position)\n if (position == self.min_position and velocity < 0):\n velocity = 0\n\n done = bool(position >= self.goal_position)\n\n reward = position\n\n self.state = (position, velocity)\n return [np.array(self.state) for _ in range(2)], reward, done, {}\n\n def _reset(self):\n self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])\n return [np.array(self.state) for _ in range(2)]\n", "path": "python/ray/rllib/examples/multiagent_mountaincar_env.py"}]} | 1,913 | 344 |
gh_patches_debug_2086 | rasdani/github-patches | git_diff | google__timesketch-90 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing of JSON timelines creates duplicate timelines with same name.
Steps to reproduce
1) command line:
echo '[
{
"datetime": "2012-04-12T17:24:38-08:00",
"timestamp_desc": "Test",
"timestamp": 1334251478000000,
"message": "Test message"
}
]' > test_dupe.json
tsctl json2ts --name test_dupe --file test_dupe.json
tsctl json2ts --name test_dupe --file test_dupe.json
2) Create new sketch
3) Notice duplicate "test_dupe" timelines on list to select from.
4) Add both
5) Explore, using "*" as filter.
6) notice duplicate results.
</issue>
<code>
[start of wsgi.py]
1 #!/usr/bin/env python
2 # Copyright 2015 Google Inc. All rights reserved.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """This module is for creating the app for a WSGI server.
16
17 Example with Gunicorn:
18 $ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application
19
20 Example configuration for Apache with mod_wsgi (a2enmod mod_wsgi):
21 <VirtualHost *:443>
22 ServerAdmin root@localhost
23 SSLEngine On
24 SSLCertificateFile /etc/apache2/cert.crt
25 SSLCertificateKeyFile /etc/apache2/cert.key
26 WSGIScriptAlias / /path/to/this/file/wsgi.py
27 </VirtualHost>
28 """
29
30 # If you installed Timesketch in a virtualenv you need to activate it.
31 # This needs to be before any imports in order to import from the virtualenv.
32 #activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'
33 #execfile(activate_virtualenv, dict(__file__=activate_virtualenv))
34
35 from timesketch import create_app
36 from timesketch.models import db_session
37
38 application = create_app()
39
40 # Remove the session after every request or app shutdown.
41 @application.teardown_appcontext
42 def shutdown_session(exception=None):
43 db_session.remove()
44
[end of wsgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wsgi.py b/wsgi.py
--- a/wsgi.py
+++ b/wsgi.py
@@ -37,7 +37,8 @@
application = create_app()
-# Remove the session after every request or app shutdown.
+# pylint: disable=unused-argument
@application.teardown_appcontext
def shutdown_session(exception=None):
+ """Remove the database session after every request or app shutdown."""
db_session.remove()
| {"golden_diff": "diff --git a/wsgi.py b/wsgi.py\n--- a/wsgi.py\n+++ b/wsgi.py\n@@ -37,7 +37,8 @@\n \n application = create_app()\n \n-# Remove the session after every request or app shutdown.\n+# pylint: disable=unused-argument\n @application.teardown_appcontext\n def shutdown_session(exception=None):\n+ \"\"\"Remove the database session after every request or app shutdown.\"\"\"\n db_session.remove()\n", "issue": "Importing of JSON timelines creates duplicate timelines with same name.\nSteps to reproduce\n1) command line:\necho '[\n {\n \"datetime\": \"2012-04-12T17:24:38-08:00\",\n \"timestamp_desc\": \"Test\",\n \"timestamp\": 1334251478000000,\n \"message\": \"Test message\"\n }\n]' > test_dupe.json \ntsctl json2ts --name test_dupe --file test_dupe.json\ntsctl json2ts --name test_dupe --file test_dupe.json\n\n2) Create new sketch\n3) Notice duplicate \"test_dupe\" timelines on list to select from.\n4) Add both\n5) Explore, using \"*\" as filter.\n6) notice duplicate results.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module is for creating the app for a WSGI server.\n\nExample with Gunicorn:\n$ gunicorn -b 127.0.0.1:4000 --log-file - wsgi:application\n\nExample configuration for Apache with mod_wsgi (a2enmod mod_wsgi):\n<VirtualHost *:443>\n ServerAdmin root@localhost\n SSLEngine On\n SSLCertificateFile /etc/apache2/cert.crt\n SSLCertificateKeyFile /etc/apache2/cert.key\n WSGIScriptAlias / /path/to/this/file/wsgi.py\n</VirtualHost>\n\"\"\"\n\n# If you installed Timesketch in a virtualenv you need to activate it.\n# This needs to be before any imports in order to import from the virtualenv.\n#activate_virtualenv = '/path/to/your/virtualenv/bin/activate_this.py'\n#execfile(activate_virtualenv, dict(__file__=activate_virtualenv))\n\nfrom timesketch import create_app\nfrom timesketch.models import db_session\n\napplication = create_app()\n\n# Remove the session after every request or app shutdown.\[email protected]_appcontext\ndef shutdown_session(exception=None):\n db_session.remove()\n", "path": "wsgi.py"}]} | 1,195 | 97 |
gh_patches_debug_11147 | rasdani/github-patches | git_diff | dask__dask-10113 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Removal of dask.compatibility.entry_points has broken dask-kubernetes
It looks like `dask.compatibility.entry_points` was removed in #10070 without warning. This was being used in `dask-kubernetes` so CI is now failing.
https://github.com/dask/dask-kubernetes/actions/runs/4499027159/jobs/7916366189?pr=683
cc @graingert @jrbourbeau
</issue>
<code>
[start of dask/compatibility.py]
1 import sys
2
3 from packaging.version import parse as parse_version
4
5 _PY_VERSION = parse_version(".".join(map(str, sys.version_info[:3])))
6
7 _EMSCRIPTEN = sys.platform == "emscripten"
8
[end of dask/compatibility.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dask/compatibility.py b/dask/compatibility.py
--- a/dask/compatibility.py
+++ b/dask/compatibility.py
@@ -1,7 +1,19 @@
import sys
+import warnings
+from importlib_metadata import entry_points as _entry_points
from packaging.version import parse as parse_version
_PY_VERSION = parse_version(".".join(map(str, sys.version_info[:3])))
_EMSCRIPTEN = sys.platform == "emscripten"
+
+
+def entry_points(group=None):
+ warnings.warn(
+ "`dask.compatibility.entry_points` has been replaced by `importlib_metadata.entry_points` and will be removed "
+ "in a future version. Please use `importlib_metadata.entry_points` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _entry_points(group=group)
| {"golden_diff": "diff --git a/dask/compatibility.py b/dask/compatibility.py\n--- a/dask/compatibility.py\n+++ b/dask/compatibility.py\n@@ -1,7 +1,19 @@\n import sys\n+import warnings\n \n+from importlib_metadata import entry_points as _entry_points\n from packaging.version import parse as parse_version\n \n _PY_VERSION = parse_version(\".\".join(map(str, sys.version_info[:3])))\n \n _EMSCRIPTEN = sys.platform == \"emscripten\"\n+\n+\n+def entry_points(group=None):\n+ warnings.warn(\n+ \"`dask.compatibility.entry_points` has been replaced by `importlib_metadata.entry_points` and will be removed \"\n+ \"in a future version. Please use `importlib_metadata.entry_points` instead.\",\n+ DeprecationWarning,\n+ stacklevel=2,\n+ )\n+ return _entry_points(group=group)\n", "issue": "Removal of dask.compatibility.entry_points has broken dask-kubernetes\nIt looks like `dask.compatibility.entry_points` was removed in #10070 without warning. This was being used in `dask-kubernetes` so CI is now failing.\r\n\r\nhttps://github.com/dask/dask-kubernetes/actions/runs/4499027159/jobs/7916366189?pr=683\r\n\r\ncc @graingert @jrbourbeau \n", "before_files": [{"content": "import sys\n\nfrom packaging.version import parse as parse_version\n\n_PY_VERSION = parse_version(\".\".join(map(str, sys.version_info[:3])))\n\n_EMSCRIPTEN = sys.platform == \"emscripten\"\n", "path": "dask/compatibility.py"}]} | 704 | 195 |
gh_patches_debug_2563 | rasdani/github-patches | git_diff | microsoft__ptvsd-297 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to launch the debugger
Getting the following error in master when debugging in VSC:
```
Could not connect to None: 60857
Traceback (most recent call last):
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py", line 1620, in main
debugger.connect(host, port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py", line 326, in connect
s = start_server(port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py", line 1766, in start_server
server = _create_server(port)
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py", line 1701, in _create_server
server.bind(('127.0.0.1', port))
OSError: [Errno 48] Address already in u
```
</issue>
<code>
[start of ptvsd/debugger.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 from ptvsd.__main__ import run_module, run_file
6
7
8 __author__ = "Microsoft Corporation <[email protected]>"
9 __version__ = "4.0.0a5"
10
11 # TODO: not needed?
12 DONT_DEBUG = []
13
14
15 def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):
16 # TODO: docstring
17 address = (None, port_num)
18 if run_as == 'module':
19 run_module(address, filename, **kwargs)
20 else:
21 run_file(address, filename, **kwargs)
22
[end of ptvsd/debugger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py
--- a/ptvsd/debugger.py
+++ b/ptvsd/debugger.py
@@ -14,7 +14,7 @@
def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):
# TODO: docstring
- address = (None, port_num)
+ address = ('localhost', port_num)
if run_as == 'module':
run_module(address, filename, **kwargs)
else:
| {"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -14,7 +14,7 @@\n \n def debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):\n # TODO: docstring\n- address = (None, port_num)\n+ address = ('localhost', port_num)\n if run_as == 'module':\n run_module(address, filename, **kwargs)\n else:\n", "issue": "Unable to launch the debugger\nGetting the following error in master when debugging in VSC:\r\n```\r\nCould not connect to None: 60857\r\nTraceback (most recent call last):\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py\", line 1620, in main\r\n debugger.connect(host, port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd/pydevd.py\", line 326, in connect\r\n s = start_server(port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py\", line 1766, in start_server\r\n server = _create_server(port)\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/wrapper.py\", line 1701, in _create_server\r\n server.bind(('127.0.0.1', port))\r\nOSError: [Errno 48] Address already in u\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom ptvsd.__main__ import run_module, run_file\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a5\"\n\n# TODO: not needed?\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as, **kwargs):\n # TODO: docstring\n address = (None, port_num)\n if run_as == 'module':\n run_module(address, filename, **kwargs)\n else:\n run_file(address, filename, **kwargs)\n", "path": "ptvsd/debugger.py"}]} | 984 | 121 |
gh_patches_debug_31566 | rasdani/github-patches | git_diff | getsentry__sentry-python-141 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log more extra data for Celery
The old integration in celery used to log arguments to the task and more. Add that to our celery integration
</issue>
<code>
[start of sentry_sdk/integrations/celery.py]
1 from __future__ import absolute_import
2
3 import sys
4
5 from celery.signals import task_failure, task_prerun, task_postrun
6 from celery.exceptions import SoftTimeLimitExceeded
7
8 from sentry_sdk.hub import Hub
9 from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
10 from sentry_sdk.integrations import Integration
11 from sentry_sdk.integrations.logging import ignore_logger
12
13
14 class CeleryIntegration(Integration):
15 identifier = "celery"
16
17 @staticmethod
18 def setup_once():
19 task_prerun.connect(_handle_task_prerun, weak=False)
20 task_postrun.connect(_handle_task_postrun, weak=False)
21 task_failure.connect(_process_failure_signal, weak=False)
22
23 # This logger logs every status of every task that ran on the worker.
24 # Meaning that every task's breadcrumbs are full of stuff like "Task
25 # <foo> raised unexpected <bar>".
26 ignore_logger("celery.worker.job")
27
28
29 def _process_failure_signal(sender, task_id, einfo, **kw):
30 # einfo from celery is not reliable
31 exc_info = sys.exc_info()
32
33 hub = Hub.current
34 integration = hub.get_integration(CeleryIntegration)
35 if integration is None:
36 return
37
38 if hasattr(sender, "throws") and isinstance(einfo.exception, sender.throws):
39 return
40
41 if isinstance(einfo.exception, SoftTimeLimitExceeded):
42 # TODO: Move this into event processor
43 with hub.push_scope() as scope:
44 scope.fingerprint = [
45 "celery",
46 "SoftTimeLimitExceeded",
47 getattr(sender, "name", sender),
48 ]
49 _capture_event(hub, exc_info)
50 else:
51 _capture_event(hub, exc_info)
52
53
54 def _handle_task_prerun(sender, task, **kw):
55 hub = Hub.current
56 if hub.get_integration(CeleryIntegration) is not None:
57 scope = hub.push_scope().__enter__()
58 with capture_internal_exceptions():
59 scope.transaction = task.name
60
61
62 def _handle_task_postrun(sender, task_id, task, **kw):
63 hub = Hub.current
64 if hub.get_integration(CeleryIntegration) is not None:
65 hub.pop_scope_unsafe()
66
67
68 def _capture_event(hub, exc_info):
69 event, hint = event_from_exception(
70 exc_info,
71 client_options=hub.client.options,
72 mechanism={"type": "celery", "handled": False},
73 )
74 hub.capture_event(event, hint=hint)
75
[end of sentry_sdk/integrations/celery.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py
--- a/sentry_sdk/integrations/celery.py
+++ b/sentry_sdk/integrations/celery.py
@@ -35,28 +35,48 @@
if integration is None:
return
- if hasattr(sender, "throws") and isinstance(einfo.exception, sender.throws):
- return
-
- if isinstance(einfo.exception, SoftTimeLimitExceeded):
- # TODO: Move this into event processor
- with hub.push_scope() as scope:
- scope.fingerprint = [
- "celery",
- "SoftTimeLimitExceeded",
- getattr(sender, "name", sender),
- ]
- _capture_event(hub, exc_info)
- else:
- _capture_event(hub, exc_info)
+ _capture_event(hub, exc_info)
-def _handle_task_prerun(sender, task, **kw):
+def _handle_task_prerun(sender, task, args, kwargs, **_):
hub = Hub.current
if hub.get_integration(CeleryIntegration) is not None:
scope = hub.push_scope().__enter__()
+ scope.add_event_processor(_make_event_processor(args, kwargs, task))
+
+
+def _make_event_processor(args, kwargs, task):
+ def event_processor(event, hint):
+ with capture_internal_exceptions():
+ if "transaction" not in event:
+ event["transaction"] = task.name
+
with capture_internal_exceptions():
- scope.transaction = task.name
+ extra = event.setdefault("extra", {})
+ extra["celery-job"] = {
+ "task_name": task.name,
+ "args": args,
+ "kwargs": kwargs,
+ }
+
+ if "exc_info" in hint:
+ with capture_internal_exceptions():
+ if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
+ event["fingerprint"] = [
+ "celery",
+ "SoftTimeLimitExceeded",
+ getattr(task, "name", task),
+ ]
+
+ with capture_internal_exceptions():
+ if hasattr(task, "throws") and isinstance(
+ hint["exc_info"][1], task.throws
+ ):
+ return None
+
+ return event
+
+ return event_processor
def _handle_task_postrun(sender, task_id, task, **kw):
| {"golden_diff": "diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py\n--- a/sentry_sdk/integrations/celery.py\n+++ b/sentry_sdk/integrations/celery.py\n@@ -35,28 +35,48 @@\n if integration is None:\n return\n \n- if hasattr(sender, \"throws\") and isinstance(einfo.exception, sender.throws):\n- return\n-\n- if isinstance(einfo.exception, SoftTimeLimitExceeded):\n- # TODO: Move this into event processor\n- with hub.push_scope() as scope:\n- scope.fingerprint = [\n- \"celery\",\n- \"SoftTimeLimitExceeded\",\n- getattr(sender, \"name\", sender),\n- ]\n- _capture_event(hub, exc_info)\n- else:\n- _capture_event(hub, exc_info)\n+ _capture_event(hub, exc_info)\n \n \n-def _handle_task_prerun(sender, task, **kw):\n+def _handle_task_prerun(sender, task, args, kwargs, **_):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n scope = hub.push_scope().__enter__()\n+ scope.add_event_processor(_make_event_processor(args, kwargs, task))\n+\n+\n+def _make_event_processor(args, kwargs, task):\n+ def event_processor(event, hint):\n+ with capture_internal_exceptions():\n+ if \"transaction\" not in event:\n+ event[\"transaction\"] = task.name\n+\n with capture_internal_exceptions():\n- scope.transaction = task.name\n+ extra = event.setdefault(\"extra\", {})\n+ extra[\"celery-job\"] = {\n+ \"task_name\": task.name,\n+ \"args\": args,\n+ \"kwargs\": kwargs,\n+ }\n+\n+ if \"exc_info\" in hint:\n+ with capture_internal_exceptions():\n+ if issubclass(hint[\"exc_info\"][0], SoftTimeLimitExceeded):\n+ event[\"fingerprint\"] = [\n+ \"celery\",\n+ \"SoftTimeLimitExceeded\",\n+ getattr(task, \"name\", task),\n+ ]\n+\n+ with capture_internal_exceptions():\n+ if hasattr(task, \"throws\") and isinstance(\n+ hint[\"exc_info\"][1], task.throws\n+ ):\n+ return None\n+\n+ return event\n+\n+ return event_processor\n \n \n def _handle_task_postrun(sender, task_id, task, **kw):\n", "issue": "Log more extra data for Celery\nThe old integration in celery used to log arguments to the task and more. Add that to our celery integration\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport sys\n\nfrom celery.signals import task_failure, task_prerun, task_postrun\nfrom celery.exceptions import SoftTimeLimitExceeded\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import capture_internal_exceptions, event_from_exception\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations.logging import ignore_logger\n\n\nclass CeleryIntegration(Integration):\n identifier = \"celery\"\n\n @staticmethod\n def setup_once():\n task_prerun.connect(_handle_task_prerun, weak=False)\n task_postrun.connect(_handle_task_postrun, weak=False)\n task_failure.connect(_process_failure_signal, weak=False)\n\n # This logger logs every status of every task that ran on the worker.\n # Meaning that every task's breadcrumbs are full of stuff like \"Task\n # <foo> raised unexpected <bar>\".\n ignore_logger(\"celery.worker.job\")\n\n\ndef _process_failure_signal(sender, task_id, einfo, **kw):\n # einfo from celery is not reliable\n exc_info = sys.exc_info()\n\n hub = Hub.current\n integration = hub.get_integration(CeleryIntegration)\n if integration is None:\n return\n\n if hasattr(sender, \"throws\") and isinstance(einfo.exception, sender.throws):\n return\n\n if isinstance(einfo.exception, SoftTimeLimitExceeded):\n # TODO: Move this into event processor\n with hub.push_scope() as scope:\n scope.fingerprint = [\n \"celery\",\n \"SoftTimeLimitExceeded\",\n getattr(sender, \"name\", sender),\n ]\n _capture_event(hub, exc_info)\n else:\n _capture_event(hub, exc_info)\n\n\ndef _handle_task_prerun(sender, task, **kw):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n scope = hub.push_scope().__enter__()\n with capture_internal_exceptions():\n scope.transaction = task.name\n\n\ndef _handle_task_postrun(sender, task_id, task, **kw):\n hub = Hub.current\n if hub.get_integration(CeleryIntegration) is not None:\n hub.pop_scope_unsafe()\n\n\ndef _capture_event(hub, exc_info):\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options,\n mechanism={\"type\": \"celery\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n", "path": "sentry_sdk/integrations/celery.py"}]} | 1,260 | 543 |
gh_patches_debug_34657 | rasdani/github-patches | git_diff | pantsbuild__pants-14125 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)
**Describe the bug**
Created a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem.
Essentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error
> ResolveError: Directory 'backend' does not contain any BUILD files
After a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets.
For example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.
As per the README in the repo:
```bash
# Should create a pex at dist/backend/projecta/projecta.pex
# Docker image created successfully as projecta-container:latest
./pants package backend/projecta::
# Should create a pex at dist/backend.projectc/projectc.pex
# Docker image created successfully as projectc-container:latest
./pants package backend/projectc::
```
```bash
# Should create a pex at dist/backend/projectb.pex
./pants package backend/projectb:projectb
# FAILS: With ResolveError
./pants package backend/projectb:projectb-container
```
So, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.
This isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.
**Pants version**
- 2.8.0
- 2.9.0rc1
**OS**
macOS 12.1
Untested on Linux
</issue>
<code>
[start of src/python/pants/backend/docker/util_rules/dependencies.py]
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
5 from pants.backend.docker.target_types import DockerDependenciesField
6 from pants.core.goals.package import PackageFieldSet
7 from pants.engine.addresses import Addresses, UnparsedAddressInputs
8 from pants.engine.rules import Get, collect_rules, rule
9 from pants.engine.target import (
10 FieldSetsPerTarget,
11 FieldSetsPerTargetRequest,
12 InjectDependenciesRequest,
13 InjectedDependencies,
14 Targets,
15 )
16 from pants.engine.unions import UnionRule
17
18
19 class InjectDockerDependencies(InjectDependenciesRequest):
20 inject_for = DockerDependenciesField
21
22
23 @rule
24 async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:
25 """Inspects COPY instructions in the Dockerfile for references to known targets."""
26 dockerfile_info = await Get(
27 DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)
28 )
29
30 targets = await Get(
31 Targets,
32 UnparsedAddressInputs(
33 dockerfile_info.putative_target_addresses,
34 owning_address=dockerfile_info.address,
35 ),
36 )
37 package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))
38 referenced_targets = (
39 field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
40 )
41 return InjectedDependencies(Addresses(referenced_targets))
42
43
44 def rules():
45 return [
46 *collect_rules(),
47 UnionRule(InjectDependenciesRequest, InjectDockerDependencies),
48 ]
49
[end of src/python/pants/backend/docker/util_rules/dependencies.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py
--- a/src/python/pants/backend/docker/util_rules/dependencies.py
+++ b/src/python/pants/backend/docker/util_rules/dependencies.py
@@ -3,6 +3,7 @@
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
from pants.backend.docker.target_types import DockerDependenciesField
+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses
from pants.core.goals.package import PackageFieldSet
from pants.engine.addresses import Addresses, UnparsedAddressInputs
from pants.engine.rules import Get, collect_rules, rule
@@ -22,18 +23,28 @@
@rule
async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:
- """Inspects COPY instructions in the Dockerfile for references to known targets."""
+ """Inspects COPY instructions in the Dockerfile for references to known packagable targets."""
dockerfile_info = await Get(
DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)
)
- targets = await Get(
- Targets,
+ # Parse all putative target addresses.
+ putative_addresses = await Get(
+ Addresses,
UnparsedAddressInputs(
dockerfile_info.putative_target_addresses,
owning_address=dockerfile_info.address,
),
)
+
+ # Get the target for those addresses that are known.
+ directories = {address.spec_path for address in putative_addresses}
+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))
+ targets = await Get(
+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))
+ )
+
+ # Only keep those targets that we can "package".
package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))
referenced_targets = (
field_sets[0].address for field_sets in package.collection if len(field_sets) > 0
| {"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/dependencies.py b/src/python/pants/backend/docker/util_rules/dependencies.py\n--- a/src/python/pants/backend/docker/util_rules/dependencies.py\n+++ b/src/python/pants/backend/docker/util_rules/dependencies.py\n@@ -3,6 +3,7 @@\n \n from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\n from pants.backend.docker.target_types import DockerDependenciesField\n+from pants.base.specs import AddressSpecs, MaybeEmptySiblingAddresses\n from pants.core.goals.package import PackageFieldSet\n from pants.engine.addresses import Addresses, UnparsedAddressInputs\n from pants.engine.rules import Get, collect_rules, rule\n@@ -22,18 +23,28 @@\n \n @rule\n async def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n- \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n+ \"\"\"Inspects COPY instructions in the Dockerfile for references to known packagable targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n \n- targets = await Get(\n- Targets,\n+ # Parse all putative target addresses.\n+ putative_addresses = await Get(\n+ Addresses,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=dockerfile_info.address,\n ),\n )\n+\n+ # Get the target for those addresses that are known.\n+ directories = {address.spec_path for address in putative_addresses}\n+ all_addresses = await Get(Addresses, AddressSpecs(map(MaybeEmptySiblingAddresses, directories)))\n+ targets = await Get(\n+ Targets, Addresses((address for address in putative_addresses if address in all_addresses))\n+ )\n+\n+ # Only keep those targets that we can \"package\".\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n", "issue": "ResolveError: Directory '{mydir}' does not contain any BUILD files (when Dockerizing packages)\n**Describe the bug**\r\n\r\nCreated a repo at https://github.com/sureshjoshi/pantsbuild-14031 to help illustrate this problem. \r\n\r\nEssentially, I use custom output paths for my .pex files, and while testing out the `docker_image` target, I noticed some of my components fail with the error \r\n\r\n> ResolveError: Directory 'backend' does not contain any BUILD files\r\n\r\nAfter a lot of debugging, I only ran into this problem when my output folders were common to multiple `pex_binary` targets. \r\n\r\nFor example, in the repo above, I have 3 identical projects (A, B, C) - where they only differ by the `pex_binary` `output_path` (and this location updated in the associated Dockerfile), and one of the projects refuses to compile.\r\n\r\nAs per the README in the repo:\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projecta/projecta.pex\r\n# Docker image created successfully as projecta-container:latest\r\n./pants package backend/projecta::\r\n\r\n# Should create a pex at dist/backend.projectc/projectc.pex\r\n# Docker image created successfully as projectc-container:latest\r\n./pants package backend/projectc::\r\n```\r\n\r\n```bash\r\n# Should create a pex at dist/backend/projectb.pex\r\n./pants package backend/projectb:projectb\r\n\r\n# FAILS: With ResolveError\r\n./pants package backend/projectb:projectb-container \r\n```\r\n\r\nSo, the difference above is that Project C uses no `output_path` and uses the dot-syntax for the dist folder. ProjectA places the pex file under a `backend/projecta` directory. The failing ProjectB places the pex file directly under `backend`.\r\n\r\nThis isn't a big issue, and easily worked around, and I'm guessing it has to do with namespacing or module/package semantics, but it's just a weird problem that is difficult to debug based on the error message.\r\n\r\n**Pants version**\r\n\r\n- 2.8.0\r\n- 2.9.0rc1\r\n\r\n**OS**\r\n\r\nmacOS 12.1\r\nUntested on Linux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest\nfrom pants.backend.docker.target_types import DockerDependenciesField\nfrom pants.core.goals.package import PackageFieldSet\nfrom pants.engine.addresses import Addresses, UnparsedAddressInputs\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.engine.target import (\n FieldSetsPerTarget,\n FieldSetsPerTargetRequest,\n InjectDependenciesRequest,\n InjectedDependencies,\n Targets,\n)\nfrom pants.engine.unions import UnionRule\n\n\nclass InjectDockerDependencies(InjectDependenciesRequest):\n inject_for = DockerDependenciesField\n\n\n@rule\nasync def inject_docker_dependencies(request: InjectDockerDependencies) -> InjectedDependencies:\n \"\"\"Inspects COPY instructions in the Dockerfile for references to known targets.\"\"\"\n dockerfile_info = await Get(\n DockerfileInfo, DockerfileInfoRequest(request.dependencies_field.address)\n )\n\n targets = await Get(\n Targets,\n UnparsedAddressInputs(\n dockerfile_info.putative_target_addresses,\n owning_address=dockerfile_info.address,\n ),\n )\n package = await Get(FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, targets))\n referenced_targets = (\n field_sets[0].address for field_sets in package.collection if len(field_sets) > 0\n )\n return InjectedDependencies(Addresses(referenced_targets))\n\n\ndef rules():\n return [\n *collect_rules(),\n UnionRule(InjectDependenciesRequest, InjectDockerDependencies),\n ]\n", "path": "src/python/pants/backend/docker/util_rules/dependencies.py"}]} | 1,459 | 453 |
gh_patches_debug_1116 | rasdani/github-patches | git_diff | scikit-hep__pyhf-895 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs build broken with Sphinx v3.1.0
# Description
Today (2020-06-08) [Sphinx `v3.1.0`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.0) was released which now classifies pyhf's particular usages of the "autoclass" directive as an Error in the docs generated for [`interpolators/code0.py`](https://github.com/scikit-hep/pyhf/blob/62becc2e469f89babf75534a2decfb3ace6ff179/src/pyhf/interpolators/code0.py)
```
Warning, treated as error:
/home/runner/work/pyhf/pyhf/docs/_generated/pyhf.interpolators.code0.rst:8:Error in "autoclass" directive:
1 argument(s) required, 0 supplied.
.. autoclass::
:show-inheritance:
.. rubric:: Methods
.. automethod:: .__init__
##[error]Process completed with exit code 1.
```
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],
5 'torch': ['torch~=1.2'],
6 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
7 'xmlio': ['uproot'],
8 'minuit': ['iminuit'],
9 }
10 extras_require['backends'] = sorted(
11 set(
12 extras_require['tensorflow']
13 + extras_require['torch']
14 + extras_require['jax']
15 + extras_require['minuit']
16 )
17 )
18 extras_require['contrib'] = sorted(set(['matplotlib']))
19
20 extras_require['test'] = sorted(
21 set(
22 extras_require['backends']
23 + extras_require['xmlio']
24 + extras_require['contrib']
25 + [
26 'pyflakes',
27 'pytest~=3.5',
28 'pytest-cov>=2.5.1',
29 'pytest-mock',
30 'pytest-benchmark[histogram]',
31 'pytest-console-scripts',
32 'pytest-mpl',
33 'pydocstyle',
34 'coverage>=4.0', # coveralls
35 'papermill~=2.0',
36 'nteract-scrapbook~=0.2',
37 'check-manifest',
38 'jupyter',
39 'uproot~=3.3',
40 'graphviz',
41 'jsonpatch',
42 'black',
43 ]
44 )
45 )
46 extras_require['docs'] = sorted(
47 set(
48 [
49 'sphinx',
50 'sphinxcontrib-bibtex',
51 'sphinx-click',
52 'sphinx_rtd_theme',
53 'nbsphinx',
54 'ipywidgets',
55 'sphinx-issues',
56 'sphinx-copybutton>0.2.9',
57 ]
58 )
59 )
60 extras_require['develop'] = sorted(
61 set(
62 extras_require['docs']
63 + extras_require['test']
64 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']
65 )
66 )
67 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
68
69
70 setup(
71 extras_require=extras_require,
72 use_scm_version=lambda: {'local_scheme': lambda version: ''},
73 )
74
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
extras_require['docs'] = sorted(
set(
[
- 'sphinx',
+ 'sphinx!=3.1.0',
'sphinxcontrib-bibtex',
'sphinx-click',
'sphinx_rtd_theme',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n extras_require['docs'] = sorted(\n set(\n [\n- 'sphinx',\n+ 'sphinx!=3.1.0',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n", "issue": "Docs build broken with Sphinx v3.1.0\n# Description\r\n\r\nToday (2020-06-08) [Sphinx `v3.1.0`](https://github.com/sphinx-doc/sphinx/releases/tag/v3.1.0) was released which now classifies pyhf's particular usages of the \"autoclass\" directive as an Error in the docs generated for [`interpolators/code0.py`](https://github.com/scikit-hep/pyhf/blob/62becc2e469f89babf75534a2decfb3ace6ff179/src/pyhf/interpolators/code0.py)\r\n\r\n```\r\nWarning, treated as error:\r\n/home/runner/work/pyhf/pyhf/docs/_generated/pyhf.interpolators.code0.rst:8:Error in \"autoclass\" directive:\r\n1 argument(s) required, 0 supplied.\r\n\r\n.. autoclass::\r\n :show-inheritance:\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n .. rubric:: Methods\r\n\r\n\r\n\r\n .. automethod:: .__init__\r\n##[error]Process completed with exit code 1.\r\n```\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'tensorflow': ['tensorflow~=2.0', 'tensorflow-probability~=0.8'],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'check-manifest',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n 'black',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,400 | 87 |
gh_patches_debug_11412 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The modprobe combiner is raising AttributeError exceptions in production.
The AllModProbe combiner is throwing a number of the exception AttributeError("'bool' object has no attribute 'append'",) in production.
</issue>
<code>
[start of insights/combiners/modprobe.py]
1 """
2 Modprobe configuration
3 ======================
4
5 The modprobe configuration files are normally available to rules as a list of
6 ModProbe objects. This combiner turns those into one set of data, preserving
7 the original file name that defined modprobe configuration line using a tuple.
8
9 """
10
11 from insights.core.plugins import combiner
12 from insights.parsers.modprobe import ModProbe
13 from .. import LegacyItemAccess
14
15 from collections import namedtuple
16
17
18 ModProbeValue = namedtuple("ModProbeValue", ['value', 'source'])
19 """
20 A value from a ModProbe source
21 """
22
23
24 @combiner(ModProbe)
25 class AllModProbe(LegacyItemAccess):
26 """
27 Combiner for accessing all the modprobe configuration files in one
28 structure.
29
30 It's important for our reporting and information purposes to know not
31 only what the configuration was but where it was defined. Therefore, the
32 format of the data in this combiner is slightly different compared to the
33 ModProbe parser. Here, each 'value' is actually a 2-tuple, with the
34 actual data first and the file name from whence the value came second.
35 This does mean that you need to pull the value out of each item - e.g.
36 using a list comprehension - but it means that every item is associated
37 with the file it was defined in.
38
39 In line with the ModProbe configuration parser, the actual value is
40 usually a list of the space-separated parts on the line, and the
41 definitions for each module are similarly kept in a list, which makes
42
43 Thanks to the LegacyItemAccess class, this can also be treated as a
44 dictionary for look-ups of data in the `data` attribute.
45
46 Attributes:
47 data (dict): The combined data structures, with each item as a
48 2-tuple, as described above.
49 bad_lines(list): The list of unparseable lines from all files, with
50 each line as a 2-tuple as described above.
51
52 Sample data files::
53
54 /etc/modprobe.conf:
55 # watchdog drivers
56 blacklist i8xx_tco
57
58 # Don't install the Firewire ethernet driver
59 install eth1394 /bin/true
60
61 /etc/modprobe.conf.d/no_ipv6.conf:
62 options ipv6 disable=1
63 install ipv6 /bin/true
64
65 Examples:
66 >>> all_modprobe = shared[AllModProbe]
67 >>> all_modprobe['alias']
68 []
69 >>> all_modprobe['blacklist']
70 {'i8xx_tco': ModProbeValue(True, '/etc/modprobe.conf')}
71 >>> all_modprobe['install']
72 {'eth1394': ModProbeValue(['/bin/true'], '/etc/modprobe.conf'),
73 'ipv6': ModProbeValue(['/bin/true'], '/etc/modprobe.conf.d/no_ipv6.conf')}
74 """
75 def __init__(self, modprobe):
76 self.data = {}
77 self.bad_lines = []
78 for mod in modprobe:
79 filename = mod.file_path # relative path inside archive
80 # Copy data section
81 for section, sectdict in mod.data.items():
82 if section not in self.data:
83 self.data[section] = {}
84 for name, value in sectdict.items():
85 if name in self.data[section]:
86 # append to this module's value - should only
87 # happen for aliases.
88 self.data[section][name][0].append(value)
89 else:
90 # create new tuple
91 self.data[section][name] = ModProbeValue(value=value, source=filename)
92 # Copy bad lines, if any
93 if mod.bad_lines:
94 self.bad_lines.extend(
95 [ModProbeValue(value=line, source=filename) for line in mod.bad_lines]
96 )
97 super(AllModProbe, self).__init__()
98
[end of insights/combiners/modprobe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py
--- a/insights/combiners/modprobe.py
+++ b/insights/combiners/modprobe.py
@@ -82,7 +82,7 @@
if section not in self.data:
self.data[section] = {}
for name, value in sectdict.items():
- if name in self.data[section]:
+ if name in self.data[section] and type(self.data[section][name][0]) == list:
# append to this module's value - should only
# happen for aliases.
self.data[section][name][0].append(value)
| {"golden_diff": "diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py\n--- a/insights/combiners/modprobe.py\n+++ b/insights/combiners/modprobe.py\n@@ -82,7 +82,7 @@\n if section not in self.data:\n self.data[section] = {}\n for name, value in sectdict.items():\n- if name in self.data[section]:\n+ if name in self.data[section] and type(self.data[section][name][0]) == list:\n # append to this module's value - should only\n # happen for aliases.\n self.data[section][name][0].append(value)\n", "issue": "The modprobe combiner is raising AttributeError exceptions in production.\nThe AllModProbe combiner is throwing a number of the exception AttributeError(\"'bool' object has no attribute 'append'\",) in production.\n", "before_files": [{"content": "\"\"\"\nModprobe configuration\n======================\n\nThe modprobe configuration files are normally available to rules as a list of\nModProbe objects. This combiner turns those into one set of data, preserving\nthe original file name that defined modprobe configuration line using a tuple.\n\n\"\"\"\n\nfrom insights.core.plugins import combiner\nfrom insights.parsers.modprobe import ModProbe\nfrom .. import LegacyItemAccess\n\nfrom collections import namedtuple\n\n\nModProbeValue = namedtuple(\"ModProbeValue\", ['value', 'source'])\n\"\"\"\nA value from a ModProbe source\n\"\"\"\n\n\n@combiner(ModProbe)\nclass AllModProbe(LegacyItemAccess):\n \"\"\"\n Combiner for accessing all the modprobe configuration files in one\n structure.\n\n It's important for our reporting and information purposes to know not\n only what the configuration was but where it was defined. Therefore, the\n format of the data in this combiner is slightly different compared to the\n ModProbe parser. Here, each 'value' is actually a 2-tuple, with the\n actual data first and the file name from whence the value came second.\n This does mean that you need to pull the value out of each item - e.g.\n using a list comprehension - but it means that every item is associated\n with the file it was defined in.\n\n In line with the ModProbe configuration parser, the actual value is\n usually a list of the space-separated parts on the line, and the\n definitions for each module are similarly kept in a list, which makes\n\n Thanks to the LegacyItemAccess class, this can also be treated as a\n dictionary for look-ups of data in the `data` attribute.\n\n Attributes:\n data (dict): The combined data structures, with each item as a\n 2-tuple, as described above.\n bad_lines(list): The list of unparseable lines from all files, with\n each line as a 2-tuple as described above.\n\n Sample data files::\n\n /etc/modprobe.conf:\n # watchdog drivers\n blacklist i8xx_tco\n\n # Don't install the Firewire ethernet driver\n install eth1394 /bin/true\n\n /etc/modprobe.conf.d/no_ipv6.conf:\n options ipv6 disable=1\n install ipv6 /bin/true\n\n Examples:\n >>> all_modprobe = shared[AllModProbe]\n >>> all_modprobe['alias']\n []\n >>> all_modprobe['blacklist']\n {'i8xx_tco': ModProbeValue(True, '/etc/modprobe.conf')}\n >>> all_modprobe['install']\n {'eth1394': ModProbeValue(['/bin/true'], '/etc/modprobe.conf'),\n 'ipv6': ModProbeValue(['/bin/true'], '/etc/modprobe.conf.d/no_ipv6.conf')}\n \"\"\"\n def __init__(self, modprobe):\n self.data = {}\n self.bad_lines = []\n for mod in modprobe:\n filename = mod.file_path # relative path inside archive\n # Copy data section\n for section, sectdict in mod.data.items():\n if section not in self.data:\n self.data[section] = {}\n for name, value in sectdict.items():\n if name in self.data[section]:\n # append to this module's value - should only\n # happen for aliases.\n self.data[section][name][0].append(value)\n else:\n # create new tuple\n self.data[section][name] = ModProbeValue(value=value, source=filename)\n # Copy bad lines, if any\n if mod.bad_lines:\n self.bad_lines.extend(\n [ModProbeValue(value=line, source=filename) for line in mod.bad_lines]\n )\n super(AllModProbe, self).__init__()\n", "path": "insights/combiners/modprobe.py"}]} | 1,583 | 147 |
gh_patches_debug_33260 | rasdani/github-patches | git_diff | apache__airflow-1056 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError in bash_operator.py
Hi,
I see a lot of these errors when running `airflow backfill` :
```
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 467, in format
s = self._fmt % record.__dict__
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)
Logged from file bash_operator.py, line 72
```
</issue>
<code>
[start of airflow/operators/bash_operator.py]
1
2 from builtins import bytes
3 import logging
4 import sys
5 from subprocess import Popen, STDOUT, PIPE
6 from tempfile import gettempdir, NamedTemporaryFile
7
8 from airflow.utils import AirflowException
9 from airflow.models import BaseOperator
10 from airflow.utils import apply_defaults, TemporaryDirectory
11
12
13 class BashOperator(BaseOperator):
14 """
15 Execute a Bash script, command or set of commands.
16
17 :param bash_command: The command, set of commands or reference to a
18 bash script (must be '.sh') to be executed.
19 :type bash_command: string
20 :param env: If env is not None, it must be a mapping that defines the
21 environment variables for the new process; these are used instead
22 of inheriting the current process environment, which is the default
23 behavior.
24 :type env: dict
25 """
26 template_fields = ('bash_command', 'env')
27 template_ext = ('.sh', '.bash',)
28 ui_color = '#f0ede4'
29
30 @apply_defaults
31 def __init__(
32 self,
33 bash_command,
34 xcom_push=False,
35 env=None,
36 *args, **kwargs):
37 """
38 If xcom_push is True, the last line written to stdout will also
39 be pushed to an XCom when the bash command completes.
40 """
41 super(BashOperator, self).__init__(*args, **kwargs)
42 self.bash_command = bash_command
43 self.env = env
44 self.xcom_push_flag = xcom_push
45
46 def execute(self, context):
47 """
48 Execute the bash command in a temporary directory
49 which will be cleaned afterwards
50 """
51 bash_command = self.bash_command
52 logging.info("tmp dir root location: \n" + gettempdir())
53 with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:
54 with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:
55
56 f.write(bytes(bash_command, 'utf_8'))
57 f.flush()
58 fname = f.name
59 script_location = tmp_dir + "/" + fname
60 logging.info("Temporary script "
61 "location :{0}".format(script_location))
62 logging.info("Running command: " + bash_command)
63 sp = Popen(
64 ['bash', fname],
65 stdout=PIPE, stderr=STDOUT,
66 cwd=tmp_dir, env=self.env)
67
68 self.sp = sp
69
70 logging.info("Output:")
71 line = ''
72 for line in iter(sp.stdout.readline, b''):
73 line = line.decode().strip()
74 logging.info(line)
75 sp.wait()
76 logging.info("Command exited with "
77 "return code {0}".format(sp.returncode))
78
79 if sp.returncode:
80 raise AirflowException("Bash command failed")
81
82 if self.xcom_push_flag:
83 return line
84
85 def on_kill(self):
86 logging.info('Sending SIGTERM signal to bash subprocess')
87 self.sp.terminate()
88
[end of airflow/operators/bash_operator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py
--- a/airflow/operators/bash_operator.py
+++ b/airflow/operators/bash_operator.py
@@ -1,7 +1,6 @@
from builtins import bytes
import logging
-import sys
from subprocess import Popen, STDOUT, PIPE
from tempfile import gettempdir, NamedTemporaryFile
@@ -22,6 +21,7 @@
of inheriting the current process environment, which is the default
behavior.
:type env: dict
+ :type output_encoding: output encoding of bash command
"""
template_fields = ('bash_command', 'env')
template_ext = ('.sh', '.bash',)
@@ -33,6 +33,7 @@
bash_command,
xcom_push=False,
env=None,
+ output_encoding='utf-8',
*args, **kwargs):
"""
If xcom_push is True, the last line written to stdout will also
@@ -42,6 +43,7 @@
self.bash_command = bash_command
self.env = env
self.xcom_push_flag = xcom_push
+ self.output_encoding = output_encoding
def execute(self, context):
"""
@@ -70,7 +72,7 @@
logging.info("Output:")
line = ''
for line in iter(sp.stdout.readline, b''):
- line = line.decode().strip()
+ line = line.decode(self.output_encoding).strip()
logging.info(line)
sp.wait()
logging.info("Command exited with "
| {"golden_diff": "diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py\n--- a/airflow/operators/bash_operator.py\n+++ b/airflow/operators/bash_operator.py\n@@ -1,7 +1,6 @@\n \n from builtins import bytes\n import logging\n-import sys\n from subprocess import Popen, STDOUT, PIPE\n from tempfile import gettempdir, NamedTemporaryFile\n \n@@ -22,6 +21,7 @@\n of inheriting the current process environment, which is the default\n behavior.\n :type env: dict\n+ :type output_encoding: output encoding of bash command\n \"\"\"\n template_fields = ('bash_command', 'env')\n template_ext = ('.sh', '.bash',)\n@@ -33,6 +33,7 @@\n bash_command,\n xcom_push=False,\n env=None,\n+ output_encoding='utf-8',\n *args, **kwargs):\n \"\"\"\n If xcom_push is True, the last line written to stdout will also\n@@ -42,6 +43,7 @@\n self.bash_command = bash_command\n self.env = env\n self.xcom_push_flag = xcom_push\n+ self.output_encoding = output_encoding\n \n def execute(self, context):\n \"\"\"\n@@ -70,7 +72,7 @@\n logging.info(\"Output:\")\n line = ''\n for line in iter(sp.stdout.readline, b''):\n- line = line.decode().strip()\n+ line = line.decode(self.output_encoding).strip()\n logging.info(line)\n sp.wait()\n logging.info(\"Command exited with \"\n", "issue": "UnicodeDecodeError in bash_operator.py\nHi,\n\nI see a lot of these errors when running `airflow backfill` : \n\n```\nTraceback (most recent call last):\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\n msg = self.format(record)\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\n return fmt.format(record)\n File \"/usr/lib/python2.7/logging/__init__.py\", line 467, in format\n s = self._fmt % record.__dict__\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 13: ordinal not in range(128)\nLogged from file bash_operator.py, line 72\n```\n\n", "before_files": [{"content": "\nfrom builtins import bytes\nimport logging\nimport sys\nfrom subprocess import Popen, STDOUT, PIPE\nfrom tempfile import gettempdir, NamedTemporaryFile\n\nfrom airflow.utils import AirflowException\nfrom airflow.models import BaseOperator\nfrom airflow.utils import apply_defaults, TemporaryDirectory\n\n\nclass BashOperator(BaseOperator):\n \"\"\"\n Execute a Bash script, command or set of commands.\n\n :param bash_command: The command, set of commands or reference to a\n bash script (must be '.sh') to be executed.\n :type bash_command: string\n :param env: If env is not None, it must be a mapping that defines the\n environment variables for the new process; these are used instead\n of inheriting the current process environment, which is the default\n behavior.\n :type env: dict\n \"\"\"\n template_fields = ('bash_command', 'env')\n template_ext = ('.sh', '.bash',)\n ui_color = '#f0ede4'\n\n @apply_defaults\n def __init__(\n self,\n bash_command,\n xcom_push=False,\n env=None,\n *args, **kwargs):\n \"\"\"\n If xcom_push is True, the last line written to stdout will also\n be pushed to an XCom when the bash command completes.\n \"\"\"\n super(BashOperator, self).__init__(*args, **kwargs)\n self.bash_command = bash_command\n self.env = env\n self.xcom_push_flag = xcom_push\n\n def execute(self, context):\n \"\"\"\n Execute the bash command in a temporary directory\n which will be cleaned afterwards\n \"\"\"\n bash_command = self.bash_command\n logging.info(\"tmp dir root location: \\n\" + gettempdir())\n with TemporaryDirectory(prefix='airflowtmp') as tmp_dir:\n with NamedTemporaryFile(dir=tmp_dir, prefix=self.task_id) as f:\n\n f.write(bytes(bash_command, 'utf_8'))\n f.flush()\n fname = f.name\n script_location = tmp_dir + \"/\" + fname\n logging.info(\"Temporary script \"\n \"location :{0}\".format(script_location))\n logging.info(\"Running command: \" + bash_command)\n sp = Popen(\n ['bash', fname],\n stdout=PIPE, stderr=STDOUT,\n cwd=tmp_dir, env=self.env)\n\n self.sp = sp\n\n logging.info(\"Output:\")\n line = ''\n for line in iter(sp.stdout.readline, b''):\n line = line.decode().strip()\n logging.info(line)\n sp.wait()\n logging.info(\"Command exited with \"\n \"return code {0}\".format(sp.returncode))\n\n if sp.returncode:\n raise AirflowException(\"Bash command failed\")\n\n if self.xcom_push_flag:\n return line\n\n def on_kill(self):\n logging.info('Sending SIGTERM signal to bash subprocess')\n self.sp.terminate()\n", "path": "airflow/operators/bash_operator.py"}]} | 1,506 | 347 |
gh_patches_debug_9103 | rasdani/github-patches | git_diff | opsdroid__opsdroid-30 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Copy message on respond
When a message responds it updates it's `text` value and passes itself to the connector. Due to pointers in Python the next rule to parse the message goes on to parse the response text.
The message respond method should create a shallow copy of itself to pass to the connector, instead of updating itself directly.
</issue>
<code>
[start of opsdroid/message.py]
1 """Class to encapsulate a message."""
2
3
4 class Message:
5 # pylint: disable=too-few-public-methods
6 """A message object."""
7
8 def __init__(self, text, user, room, connector):
9 """Create object with minimum properties."""
10 self.text = text
11 self.user = user
12 self.room = room
13 self.connector = connector
14 self.regex = None
15
16 def respond(self, text):
17 """Respond to this message using the connector it was created by."""
18 self.text = text
19 self.connector.respond(self)
20
[end of opsdroid/message.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/message.py b/opsdroid/message.py
--- a/opsdroid/message.py
+++ b/opsdroid/message.py
@@ -1,5 +1,7 @@
"""Class to encapsulate a message."""
+from copy import copy
+
class Message:
# pylint: disable=too-few-public-methods
@@ -15,5 +17,6 @@
def respond(self, text):
"""Respond to this message using the connector it was created by."""
- self.text = text
- self.connector.respond(self)
+ response = copy(self)
+ response.text = text
+ self.connector.respond(response)
| {"golden_diff": "diff --git a/opsdroid/message.py b/opsdroid/message.py\n--- a/opsdroid/message.py\n+++ b/opsdroid/message.py\n@@ -1,5 +1,7 @@\n \"\"\"Class to encapsulate a message.\"\"\"\n \n+from copy import copy\n+\n \n class Message:\n # pylint: disable=too-few-public-methods\n@@ -15,5 +17,6 @@\n \n def respond(self, text):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n- self.text = text\n- self.connector.respond(self)\n+ response = copy(self)\n+ response.text = text\n+ self.connector.respond(response)\n", "issue": "Copy message on respond\nWhen a message responds it updates it's `text` value and passes itself to the connector. Due to pointers in Python the next rule to parse the message goes on to parse the response text.\n\nThe message respond method should create a shallow copy of itself to pass to the connector, instead of updating itself directly.\n\n", "before_files": [{"content": "\"\"\"Class to encapsulate a message.\"\"\"\n\n\nclass Message:\n # pylint: disable=too-few-public-methods\n \"\"\"A message object.\"\"\"\n\n def __init__(self, text, user, room, connector):\n \"\"\"Create object with minimum properties.\"\"\"\n self.text = text\n self.user = user\n self.room = room\n self.connector = connector\n self.regex = None\n\n def respond(self, text):\n \"\"\"Respond to this message using the connector it was created by.\"\"\"\n self.text = text\n self.connector.respond(self)\n", "path": "opsdroid/message.py"}]} | 755 | 149 |
gh_patches_debug_25188 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-115 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add unit tests for stride_tricks/broadcast_shape
</issue>
<code>
[start of heat/core/stride_tricks.py]
1 import itertools
2
3
4 def broadcast_shape(shape_a, shape_b):
5 """
6 Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post:
7 https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape
8
9 Parameters
10 ----------
11 shape_a : tuple of ints
12 shape of operand a
13 shape_b : tuple of ints
14 shape of operand b
15
16 Returns
17 -------
18 broadcast_shape : tuple of ints
19 the broadcast shape
20
21 Raises
22 -------
23 ValueError
24 If the two shapes cannot be broadcast.
25 """
26 #TODO: test me
27 it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)
28 resulting_shape = max(len(shape_a), len(shape_b)) * [None]
29 for i, (a, b) in enumerate(it):
30 if a == 1 or b == 1 or a == b:
31 resulting_shape[i] = max(a, b)
32 else:
33 raise ValueError('operands could not be broadcast, input shapes {} {}'.format(shape_a, shape_b))
34
35 return tuple(resulting_shape[::-1])
36
37
38 def sanitize_axis(shape, axis):
39 """
40 Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent
41 and is checked to be within bounds
42
43 Parameters
44 ----------
45 shape : tuple of ints
46 shape of an array
47 axis : ints
48 the axis to be sanitized
49
50 Returns
51 -------
52 sane_axis : int
53 the sane axis
54
55 Raises
56 -------
57 ValueError
58 if the axis cannot be sanitized, i.e. out of bounds.
59 TypeError
60 if the the axis is not integral.
61 """
62 #TODO: test me
63
64 if axis is not None:
65 if isinstance(axis, tuple):
66 raise NotImplementedError('Not implemented for axis: tuple of ints')
67 if not isinstance(axis, int):
68 raise TypeError('axis must be None or int, but was {}'.format(type(axis)))
69
70 if axis is None or 0 <= axis < len(shape):
71 return axis
72 elif axis < 0:
73 axis += len(shape)
74
75 if axis < 0 or axis >= len(shape):
76 raise ValueError('axis axis {} is out of bounds for shape {}'.format(axis, shape))
77
78 return axis
79
80
81 def sanitize_shape(shape):
82 """
83 Verifies and normalizes the given shape.
84
85 Parameters
86 ----------
87 shape : int or sequence of ints
88 Shape of an array.
89
90 Returns
91 -------
92 sane_shape : tuple of ints
93 The sanitized shape.
94
95 Raises
96 -------
97 ValueError
98 If the shape contains illegal values, e.g. negative numbers.
99 TypeError
100 If the given shape is neither and int or a sequence of ints.
101
102 Examples
103 --------
104 >>> sanitize_shape(3)
105 (3,)
106
107 >>> sanitize_shape([1, 2, 3])
108 (1, 2, 3,)
109
110 >>> sanitize_shape(1.0)
111 TypeError
112 """
113 shape = (shape,) if not hasattr(shape, '__iter__') else tuple(shape)
114
115 for dimension in shape:
116 if not isinstance(dimension, int):
117 raise TypeError('expected sequence object with length >= 0 or a single integer')
118 if dimension <= 0:
119 raise ValueError('negative dimensions are not allowed')
120
121 return shape
122
[end of heat/core/stride_tricks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py
--- a/heat/core/stride_tricks.py
+++ b/heat/core/stride_tricks.py
@@ -22,8 +22,22 @@
-------
ValueError
If the two shapes cannot be broadcast.
+
+ Examples
+ -------
+ >>> broadcast_shape((5,4),(4,))
+ (5,4)
+
+ >>> broadcast_shape((1,100,1),(10,1,5))
+ (10,100,5)
+
+ >>> broadcast_shape((8,1,6,1),(7,1,5,))
+ (8,7,6,5))
+
+ >>> broadcast_shape((2,1),(8,4,3))
+ ValueError
"""
- #TODO: test me
+
it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)
resulting_shape = max(len(shape_a), len(shape_b)) * [None]
for i, (a, b) in enumerate(it):
@@ -58,9 +72,23 @@
if the axis cannot be sanitized, i.e. out of bounds.
TypeError
if the the axis is not integral.
+
+ Examples
+ -------
+ >>> sanitize_axis((5,4,4),1)
+ 1
+
+ >>> sanitize_axis((5,4,4),-1)
+ 2
+
+ >>> sanitize_axis((5, 4), (1,))
+ NotImplementedError
+
+ >>> sanitize_axis((5, 4), 1.0)
+ TypeError
+
"""
- #TODO: test me
-
+
if axis is not None:
if isinstance(axis, tuple):
raise NotImplementedError('Not implemented for axis: tuple of ints')
| {"golden_diff": "diff --git a/heat/core/stride_tricks.py b/heat/core/stride_tricks.py\n--- a/heat/core/stride_tricks.py\n+++ b/heat/core/stride_tricks.py\n@@ -22,8 +22,22 @@\n -------\n ValueError\n If the two shapes cannot be broadcast.\n+\n+ Examples\n+ -------\n+ >>> broadcast_shape((5,4),(4,))\n+ (5,4)\n+\n+ >>> broadcast_shape((1,100,1),(10,1,5))\n+ (10,100,5)\n+\n+ >>> broadcast_shape((8,1,6,1),(7,1,5,))\n+ (8,7,6,5))\n+\n+ >>> broadcast_shape((2,1),(8,4,3))\n+ ValueError\n \"\"\"\n- #TODO: test me\n+\n it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)\n resulting_shape = max(len(shape_a), len(shape_b)) * [None]\n for i, (a, b) in enumerate(it):\n@@ -58,9 +72,23 @@\n if the axis cannot be sanitized, i.e. out of bounds.\n TypeError\n if the the axis is not integral.\n+\n+ Examples\n+ -------\n+ >>> sanitize_axis((5,4,4),1)\n+ 1\n+\n+ >>> sanitize_axis((5,4,4),-1)\n+ 2\n+\n+ >>> sanitize_axis((5, 4), (1,))\n+ NotImplementedError\n+\n+ >>> sanitize_axis((5, 4), 1.0)\n+ TypeError\n+\n \"\"\"\n- #TODO: test me\n- \n+\n if axis is not None:\n if isinstance(axis, tuple):\n raise NotImplementedError('Not implemented for axis: tuple of ints')\n", "issue": "Add unit tests for stride_tricks/broadcast_shape\n\n", "before_files": [{"content": "import itertools\n\n\ndef broadcast_shape(shape_a, shape_b):\n \"\"\"\n Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post:\n https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape\n\n Parameters\n ----------\n shape_a : tuple of ints\n shape of operand a\n shape_b : tuple of ints\n shape of operand b\n\n Returns\n -------\n broadcast_shape : tuple of ints\n the broadcast shape\n\n Raises\n -------\n ValueError\n If the two shapes cannot be broadcast.\n \"\"\"\n #TODO: test me\n it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)\n resulting_shape = max(len(shape_a), len(shape_b)) * [None]\n for i, (a, b) in enumerate(it):\n if a == 1 or b == 1 or a == b:\n resulting_shape[i] = max(a, b)\n else:\n raise ValueError('operands could not be broadcast, input shapes {} {}'.format(shape_a, shape_b))\n\n return tuple(resulting_shape[::-1])\n\n\ndef sanitize_axis(shape, axis):\n \"\"\"\n Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent\n and is checked to be within bounds\n\n Parameters\n ----------\n shape : tuple of ints\n shape of an array\n axis : ints\n the axis to be sanitized\n\n Returns\n -------\n sane_axis : int\n the sane axis\n\n Raises\n -------\n ValueError\n if the axis cannot be sanitized, i.e. out of bounds.\n TypeError\n if the the axis is not integral.\n \"\"\"\n #TODO: test me\n \n if axis is not None:\n if isinstance(axis, tuple):\n raise NotImplementedError('Not implemented for axis: tuple of ints')\n if not isinstance(axis, int):\n raise TypeError('axis must be None or int, but was {}'.format(type(axis)))\n\n if axis is None or 0 <= axis < len(shape):\n return axis\n elif axis < 0:\n axis += len(shape)\n\n if axis < 0 or axis >= len(shape):\n raise ValueError('axis axis {} is out of bounds for shape {}'.format(axis, shape))\n\n return axis\n\n\ndef sanitize_shape(shape):\n \"\"\"\n Verifies and normalizes the given shape.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of an array.\n\n Returns\n -------\n sane_shape : tuple of ints\n The sanitized shape.\n\n Raises\n -------\n ValueError\n If the shape contains illegal values, e.g. negative numbers.\n TypeError\n If the given shape is neither and int or a sequence of ints.\n\n Examples\n --------\n >>> sanitize_shape(3)\n (3,)\n\n >>> sanitize_shape([1, 2, 3])\n (1, 2, 3,)\n\n >>> sanitize_shape(1.0)\n TypeError\n \"\"\"\n shape = (shape,) if not hasattr(shape, '__iter__') else tuple(shape)\n\n for dimension in shape:\n if not isinstance(dimension, int):\n raise TypeError('expected sequence object with length >= 0 or a single integer')\n if dimension <= 0:\n raise ValueError('negative dimensions are not allowed')\n\n return shape\n", "path": "heat/core/stride_tricks.py"}]} | 1,559 | 418 |
gh_patches_debug_6425 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-736 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Heat software development status "Beta"
**Related**
--
**Feature functionality**
The software development status in PyPI is listed as "3 - Alpha". We are currently considering Heat as Beta, so this should be reflected in the status, which I propose to set to "4 - Beta".
**Additional context**
--
</issue>
<code>
[start of setup.py]
1 from setuptools import setup, find_packages
2 import codecs
3
4
5 with codecs.open("README.md", "r", "utf-8") as handle:
6 long_description = handle.read()
7
8 __version__ = None # appeases flake, assignment in exec() below
9 with open("./heat/core/version.py") as handle:
10 exec(handle.read())
11
12 setup(
13 name="heat",
14 packages=find_packages(exclude=("*tests*", "*benchmarks*")),
15 data_files=["README.md", "LICENSE"],
16 version=__version__,
17 description="A framework for high-performance data analytics and machine learning.",
18 long_description=long_description,
19 long_description_content_type="text/markdown",
20 author="Helmholtz Association",
21 author_email="[email protected]",
22 url="https://github.com/helmholtz-analytics/heat",
23 keywords=["data", "analytics", "tensors", "distributed", "gpu"],
24 python_requires="~=3.6",
25 classifiers=[
26 "Development Status :: 3 - Alpha",
27 "Programming Language :: Python :: 3.6",
28 "Programming Language :: Python :: 3.7",
29 "Programming Language :: Python :: 3.8",
30 "License :: OSI Approved :: MIT License",
31 "Intended Audience :: Science/Research",
32 "Topic :: Scientific/Engineering",
33 ],
34 install_requires=[
35 "mpi4py>=3.0.0",
36 "numpy>=1.13.0",
37 "torch>=1.7.0",
38 "scipy>=0.14.0",
39 "pillow>=6.0.0",
40 "torchvision>=0.5.0",
41 ],
42 extras_require={
43 "hdf5": ["h5py>=2.8.0"],
44 "netcdf": ["netCDF4>=1.4.0"],
45 "dev": ["pre-commit>=1.18.3"],
46 },
47 )
48
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
keywords=["data", "analytics", "tensors", "distributed", "gpu"],
python_requires="~=3.6",
classifiers=[
- "Development Status :: 3 - Alpha",
+ "Development Status :: 4 - Beta",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\"~=3.6\",\n classifiers=[\n- \"Development Status :: 3 - Alpha\",\n+ \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n", "issue": "Heat software development status \"Beta\"\n**Related**\r\n--\r\n\r\n**Feature functionality**\r\nThe software development status in PyPI is listed as \"3 - Alpha\". We are currently considering Heat as Beta, so this should be reflected in the status, which I propose to set to \"4 - Beta\".\r\n\r\n**Additional context**\r\n--\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport codecs\n\n\nwith codecs.open(\"README.md\", \"r\", \"utf-8\") as handle:\n long_description = handle.read()\n\n__version__ = None # appeases flake, assignment in exec() below\nwith open(\"./heat/core/version.py\") as handle:\n exec(handle.read())\n\nsetup(\n name=\"heat\",\n packages=find_packages(exclude=(\"*tests*\", \"*benchmarks*\")),\n data_files=[\"README.md\", \"LICENSE\"],\n version=__version__,\n description=\"A framework for high-performance data analytics and machine learning.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Helmholtz Association\",\n author_email=\"[email protected]\",\n url=\"https://github.com/helmholtz-analytics/heat\",\n keywords=[\"data\", \"analytics\", \"tensors\", \"distributed\", \"gpu\"],\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n ],\n install_requires=[\n \"mpi4py>=3.0.0\",\n \"numpy>=1.13.0\",\n \"torch>=1.7.0\",\n \"scipy>=0.14.0\",\n \"pillow>=6.0.0\",\n \"torchvision>=0.5.0\",\n ],\n extras_require={\n \"hdf5\": [\"h5py>=2.8.0\"],\n \"netcdf\": [\"netCDF4>=1.4.0\"],\n \"dev\": [\"pre-commit>=1.18.3\"],\n },\n)\n", "path": "setup.py"}]} | 1,100 | 121 |
gh_patches_debug_467 | rasdani/github-patches | git_diff | ocadotechnology__codeforlife-portal-442 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
New run on local fails because of latest pillow version
Needs to be set to 2.9
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from setuptools import find_packages, setup
3 import versioneer
4
5 setup(name='codeforlife-portal',
6 cmdclass=versioneer.get_cmdclass(),
7 version=versioneer.get_version(),
8 packages=find_packages(),
9 include_package_data=True,
10 install_requires=[
11 'django==1.8.2',
12 'django-appconf==1.0.1',
13 'django-countries==3.4.1',
14 'djangorestframework==3.1.3',
15 'django-jquery==1.9.1',
16 'django-autoconfig==0.3.6',
17 'django-pipeline==1.5.4',
18
19 'pyyaml==3.10',
20 'rapid-router >= 1.0.0.post.dev1',
21 'six==1.9.0',
22 'docutils==0.12',
23 'django-recaptcha-field==1.0b2',
24 'reportlab==3.2.0',
25 'postcodes==0.1',
26 'django-formtools==1.0',
27 'django-two-factor-auth==1.2.0',
28 'urllib3==1.10.4',
29 'requests==2.7.0',
30
31 'django-cms==3.1.2',
32
33 'django-classy-tags==0.6.1',
34 'django-treebeard==3.0',
35 'django-sekizai==0.8.2',
36 'djangocms-admin-style==0.2.8',
37
38 'djangocms-text-ckeditor==2.6.0',
39 'djangocms-link==1.6.2',
40 'djangocms-snippet==1.5',
41 'djangocms-style==1.5',
42 'djangocms-column==1.5',
43 'djangocms-grid==1.2',
44 'djangocms-oembed==0.5',
45 'djangocms-table==1.2',
46 'djangocms-file==0.1',
47 'djangocms_flash==0.2.0',
48 'djangocms_googlemap==0.3',
49 'djangocms_inherit==0.1',
50 'djangocms_picture==0.1',
51 'djangocms_teaser==0.1',
52 'djangocms_video==0.1',
53 'django-online-status==0.1.0',
54
55
56 'Pillow>=2.9.0',
57 'django-reversion==1.9.3',
58 'sqlparse',
59 'libsass',
60 ],
61 tests_require=[
62 'django-setuptest',
63 'django-selenium-clean==0.2.1',
64 'responses==0.4.0',
65 'selenium==2.48.0',
66 ],
67 test_suite='setuptest.setuptest.SetupTestSuite',
68 zip_safe=False,
69 )
70
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,7 @@
'django-online-status==0.1.0',
- 'Pillow>=2.9.0',
+ 'Pillow==2.9.0',
'django-reversion==1.9.3',
'sqlparse',
'libsass',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,7 +53,7 @@\n 'django-online-status==0.1.0',\n \n \n- 'Pillow>=2.9.0',\n+ 'Pillow==2.9.0',\n 'django-reversion==1.9.3',\n 'sqlparse',\n 'libsass',\n", "issue": "New run on local fails because of latest pillow version\nNeeds to be set to 2.9\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom setuptools import find_packages, setup\nimport versioneer\n\nsetup(name='codeforlife-portal',\n cmdclass=versioneer.get_cmdclass(),\n version=versioneer.get_version(),\n packages=find_packages(),\n include_package_data=True,\n install_requires=[\n 'django==1.8.2',\n 'django-appconf==1.0.1',\n 'django-countries==3.4.1',\n 'djangorestframework==3.1.3',\n 'django-jquery==1.9.1',\n 'django-autoconfig==0.3.6',\n 'django-pipeline==1.5.4',\n\n 'pyyaml==3.10',\n 'rapid-router >= 1.0.0.post.dev1',\n 'six==1.9.0',\n 'docutils==0.12',\n 'django-recaptcha-field==1.0b2',\n 'reportlab==3.2.0',\n 'postcodes==0.1',\n 'django-formtools==1.0',\n 'django-two-factor-auth==1.2.0',\n 'urllib3==1.10.4',\n 'requests==2.7.0',\n\n 'django-cms==3.1.2',\n\n 'django-classy-tags==0.6.1',\n 'django-treebeard==3.0',\n 'django-sekizai==0.8.2',\n 'djangocms-admin-style==0.2.8',\n\n 'djangocms-text-ckeditor==2.6.0',\n 'djangocms-link==1.6.2',\n 'djangocms-snippet==1.5',\n 'djangocms-style==1.5',\n 'djangocms-column==1.5',\n 'djangocms-grid==1.2',\n 'djangocms-oembed==0.5',\n 'djangocms-table==1.2',\n 'djangocms-file==0.1',\n 'djangocms_flash==0.2.0',\n 'djangocms_googlemap==0.3',\n 'djangocms_inherit==0.1',\n 'djangocms_picture==0.1',\n 'djangocms_teaser==0.1',\n 'djangocms_video==0.1',\n 'django-online-status==0.1.0',\n\n\n 'Pillow>=2.9.0',\n 'django-reversion==1.9.3',\n 'sqlparse',\n 'libsass',\n ],\n tests_require=[\n 'django-setuptest',\n 'django-selenium-clean==0.2.1',\n 'responses==0.4.0',\n 'selenium==2.48.0',\n ],\n test_suite='setuptest.setuptest.SetupTestSuite',\n zip_safe=False,\n )\n", "path": "setup.py"}]} | 1,332 | 92 |
gh_patches_debug_24362 | rasdani/github-patches | git_diff | liqd__a4-opin-496 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Markdown messes with Gender Mainstreaming
When writing “Initiator*innen […] Entscheidungsträger*innen” in a comment, the text between the `*` is set in italics, because of the markdown formatting, I assume. Is there anything we can do about that? If I remember it correctly, some version of markdown only allows underscores for emphasis and double “*” for setting something in bold. Should we maybe use that version?

Example here: https://opin-stage.liqd.net/de/projects/opin-alleinstellungsmerkmale-fur-produktseite/
Markdown in comments in consistent with rest
We decided against Markdown in most of the other input fields and used CKEditor instead, but comments still use markdown. But this is not document anywhere. So these are our options:
1. support markdown in comments, but also advertise it to the user
2. support only new lines and nor further formatting in comments (like _italic_, **bold**, ~~strike~~)
3. add ckeditor to comment edit field and allow some basic html in comments
</issue>
<code>
[start of euth/comments/templatetags/react_comments.py]
1 import json
2
3 from django import template, utils
4 from django.contrib.contenttypes.models import ContentType
5 from django.utils.safestring import mark_safe
6
7
8 from ..models import Comment
9 from ..serializers import ThreadSerializer
10
11 register = template.Library()
12
13
14 @register.simple_tag(takes_context=True)
15 def react_comments(context, obj):
16 request = context['request']
17
18 serializer = ThreadSerializer(
19 obj.comments.all(), many=True, context={'request': request})
20 comments = serializer.data
21
22 user = request.user
23 is_authenticated = user.is_authenticated()
24 is_moderator = user.is_superuser or user in obj.project.moderators.all()
25 user_name = user.username
26
27 contenttype = ContentType.objects.get_for_model(obj)
28 permission = '{ct.app_label}.comment_{ct.model}'.format(ct=contenttype)
29 has_comment_permission = user.has_perm(permission, obj)
30
31 comments_contenttype = ContentType.objects.get_for_model(Comment)
32 pk = obj.pk
33
34 language = utils.translation.get_language()
35
36 mountpoint = 'comments_for_{contenttype}_{pk}'.format(
37 contenttype=contenttype.pk,
38 pk=pk
39 )
40 attributes = {
41 'comments': comments,
42 'comments_contenttype': comments_contenttype.pk,
43 'subjectType': contenttype.pk,
44 'subjectId': pk,
45 'isAuthenticated': is_authenticated,
46 'isModerator': is_moderator,
47 'user_name': user_name,
48 'language': language,
49 'isReadOnly': not has_comment_permission,
50 }
51
52 return mark_safe((
53 '<div id={mountpoint}></div><script>window.opin.renderComment('
54 '{mountpoint}, {attributes})</script>').format(
55 attributes=json.dumps(attributes),
56 mountpoint=json.dumps(mountpoint)
57 )
58 )
59
[end of euth/comments/templatetags/react_comments.py]
[start of euth/comments/models.py]
1 from django.conf import settings
2 from django.contrib.contenttypes.fields import (GenericForeignKey,
3 GenericRelation)
4 from django.contrib.contenttypes.models import ContentType
5 from django.db import models
6 from django.utils.translation import ugettext_lazy as _
7
8 from euth.contrib.base_models import UserGeneratedContentModel
9 from euth.contrib.generics import models_to_limit
10 from euth.ratings import models as rating_models
11
12
13 class Comment(UserGeneratedContentModel):
14
15 content_type = models.ForeignKey(
16 ContentType,
17 on_delete=models.CASCADE,
18 limit_choices_to=models_to_limit(settings.COMMENTABLES)
19 )
20 object_pk = models.PositiveIntegerField()
21 content_object = GenericForeignKey(
22 ct_field="content_type", fk_field="object_pk")
23 comment = models.TextField(max_length=1024)
24 is_removed = models.BooleanField(default=False)
25 is_censored = models.BooleanField(default=False)
26 ratings = GenericRelation(rating_models.Rating,
27 related_query_name='comment',
28 object_id_field='object_pk')
29 child_comments = GenericRelation('self',
30 related_query_name='parent_comment',
31 object_id_field='object_pk')
32
33 class Meta:
34 verbose_name = _("Comment")
35 verbose_name_plural = _("Comments")
36 ordering = ('created',)
37
38 def __str__(self):
39 if len(self.comment) > 50:
40 return "comment: {} ...".format(self.comment[:50])
41 else:
42 return "comment: {}".format(self.comment)
43
44 def save(self, *args, **kwargs):
45 """
46 Change the text of the comment if
47 the comment was marked removed or censored
48 """
49
50 if self.is_removed:
51 self.comment = 'deleted by creator'
52 if self.is_censored:
53 self.comment = 'deleted by moderator'
54 return super(Comment, self).save(*args, **kwargs)
55
56 def get_absolute_url(self):
57 if hasattr(self.content_object, 'get_absolute_url'):
58 return self.content_object.get_absolute_url()
59 elif hasattr(self.project, 'get_absolute_url'):
60 return self.project.get_absolute_url()
61 else:
62 return None
63
64 @property
65 def notification_content(self):
66 return self.comment
67
68 @property
69 def project(self):
70 co = self.content_object
71 if isinstance(co, self.__class__):
72 co = co.content_object
73 return co.project
74
[end of euth/comments/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/euth/comments/models.py b/euth/comments/models.py
--- a/euth/comments/models.py
+++ b/euth/comments/models.py
@@ -5,6 +5,7 @@
from django.db import models
from django.utils.translation import ugettext_lazy as _
+from contrib.transforms import html_transforms
from euth.contrib.base_models import UserGeneratedContentModel
from euth.contrib.generics import models_to_limit
from euth.ratings import models as rating_models
@@ -47,6 +48,9 @@
the comment was marked removed or censored
"""
+ self.comment = html_transforms.clean_html_all(
+ self.comment)
+
if self.is_removed:
self.comment = 'deleted by creator'
if self.is_censored:
diff --git a/euth/comments/templatetags/react_comments.py b/euth/comments/templatetags/react_comments.py
--- a/euth/comments/templatetags/react_comments.py
+++ b/euth/comments/templatetags/react_comments.py
@@ -16,7 +16,8 @@
request = context['request']
serializer = ThreadSerializer(
- obj.comments.all(), many=True, context={'request': request})
+ obj.comments.all().order_by('-created'),
+ many=True, context={'request': request})
comments = serializer.data
user = request.user
| {"golden_diff": "diff --git a/euth/comments/models.py b/euth/comments/models.py\n--- a/euth/comments/models.py\n+++ b/euth/comments/models.py\n@@ -5,6 +5,7 @@\n from django.db import models\n from django.utils.translation import ugettext_lazy as _\n \n+from contrib.transforms import html_transforms\n from euth.contrib.base_models import UserGeneratedContentModel\n from euth.contrib.generics import models_to_limit\n from euth.ratings import models as rating_models\n@@ -47,6 +48,9 @@\n the comment was marked removed or censored\n \"\"\"\n \n+ self.comment = html_transforms.clean_html_all(\n+ self.comment)\n+\n if self.is_removed:\n self.comment = 'deleted by creator'\n if self.is_censored:\ndiff --git a/euth/comments/templatetags/react_comments.py b/euth/comments/templatetags/react_comments.py\n--- a/euth/comments/templatetags/react_comments.py\n+++ b/euth/comments/templatetags/react_comments.py\n@@ -16,7 +16,8 @@\n request = context['request']\n \n serializer = ThreadSerializer(\n- obj.comments.all(), many=True, context={'request': request})\n+ obj.comments.all().order_by('-created'),\n+ many=True, context={'request': request})\n comments = serializer.data\n \n user = request.user\n", "issue": "Markdown messes with Gender Mainstreaming\nWhen writing \u201cInitiator*innen [\u2026] Entscheidungstr\u00e4ger*innen\u201d in a comment, the text between the `*` is set in italics, because of the markdown formatting, I assume. Is there anything we can do about that? If I remember it correctly, some version of markdown only allows underscores for emphasis and double \u201c*\u201d for setting something in bold. Should we maybe use that version?\r\n\r\n\r\n\r\nExample here: https://opin-stage.liqd.net/de/projects/opin-alleinstellungsmerkmale-fur-produktseite/\nMarkdown in comments in consistent with rest\nWe decided against Markdown in most of the other input fields and used CKEditor instead, but comments still use markdown. But this is not document anywhere. So these are our options:\n1. support markdown in comments, but also advertise it to the user\n2. support only new lines and nor further formatting in comments (like _italic_, **bold**, ~~strike~~)\n3. add ckeditor to comment edit field and allow some basic html in comments\n\n", "before_files": [{"content": "import json\n\nfrom django import template, utils\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.safestring import mark_safe\n\n\nfrom ..models import Comment\nfrom ..serializers import ThreadSerializer\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=True)\ndef react_comments(context, obj):\n request = context['request']\n\n serializer = ThreadSerializer(\n obj.comments.all(), many=True, context={'request': request})\n comments = serializer.data\n\n user = request.user\n is_authenticated = user.is_authenticated()\n is_moderator = user.is_superuser or user in obj.project.moderators.all()\n user_name = user.username\n\n contenttype = ContentType.objects.get_for_model(obj)\n permission = '{ct.app_label}.comment_{ct.model}'.format(ct=contenttype)\n has_comment_permission = user.has_perm(permission, obj)\n\n comments_contenttype = ContentType.objects.get_for_model(Comment)\n pk = obj.pk\n\n language = utils.translation.get_language()\n\n mountpoint = 'comments_for_{contenttype}_{pk}'.format(\n contenttype=contenttype.pk,\n pk=pk\n )\n attributes = {\n 'comments': comments,\n 'comments_contenttype': comments_contenttype.pk,\n 'subjectType': contenttype.pk,\n 'subjectId': pk,\n 'isAuthenticated': is_authenticated,\n 'isModerator': is_moderator,\n 'user_name': user_name,\n 'language': language,\n 'isReadOnly': not has_comment_permission,\n }\n\n return mark_safe((\n '<div id={mountpoint}></div><script>window.opin.renderComment('\n '{mountpoint}, {attributes})</script>').format(\n attributes=json.dumps(attributes),\n mountpoint=json.dumps(mountpoint)\n )\n )\n", "path": "euth/comments/templatetags/react_comments.py"}, {"content": "from django.conf import settings\nfrom django.contrib.contenttypes.fields import (GenericForeignKey,\n GenericRelation)\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom euth.contrib.base_models import UserGeneratedContentModel\nfrom euth.contrib.generics import models_to_limit\nfrom euth.ratings import models as rating_models\n\n\nclass Comment(UserGeneratedContentModel):\n\n content_type = models.ForeignKey(\n ContentType,\n on_delete=models.CASCADE,\n limit_choices_to=models_to_limit(settings.COMMENTABLES)\n )\n object_pk = models.PositiveIntegerField()\n content_object = GenericForeignKey(\n ct_field=\"content_type\", fk_field=\"object_pk\")\n comment = models.TextField(max_length=1024)\n is_removed = models.BooleanField(default=False)\n is_censored = models.BooleanField(default=False)\n ratings = GenericRelation(rating_models.Rating,\n related_query_name='comment',\n object_id_field='object_pk')\n child_comments = GenericRelation('self',\n related_query_name='parent_comment',\n object_id_field='object_pk')\n\n class Meta:\n verbose_name = _(\"Comment\")\n verbose_name_plural = _(\"Comments\")\n ordering = ('created',)\n\n def __str__(self):\n if len(self.comment) > 50:\n return \"comment: {} ...\".format(self.comment[:50])\n else:\n return \"comment: {}\".format(self.comment)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Change the text of the comment if\n the comment was marked removed or censored\n \"\"\"\n\n if self.is_removed:\n self.comment = 'deleted by creator'\n if self.is_censored:\n self.comment = 'deleted by moderator'\n return super(Comment, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n if hasattr(self.content_object, 'get_absolute_url'):\n return self.content_object.get_absolute_url()\n elif hasattr(self.project, 'get_absolute_url'):\n return self.project.get_absolute_url()\n else:\n return None\n\n @property\n def notification_content(self):\n return self.comment\n\n @property\n def project(self):\n co = self.content_object\n if isinstance(co, self.__class__):\n co = co.content_object\n return co.project\n", "path": "euth/comments/models.py"}]} | 2,000 | 296 |
gh_patches_debug_1853 | rasdani/github-patches | git_diff | microsoft__playwright-python-145 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DEBUG outputs won't get forwarded
</issue>
<code>
[start of playwright/main.py]
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import asyncio
16 import subprocess
17 import sys
18 from typing import Any
19
20 from greenlet import greenlet
21
22 from playwright.async_api import Playwright as AsyncPlaywright
23 from playwright.connection import Connection
24 from playwright.helper import Error
25 from playwright.object_factory import create_remote_object
26 from playwright.path_utils import get_file_dirname
27 from playwright.playwright import Playwright
28 from playwright.sync_api import Playwright as SyncPlaywright
29 from playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber
30
31
32 def compute_driver_name() -> str:
33 platform = sys.platform
34 if platform == "darwin":
35 result = "driver-macos"
36 elif platform == "linux":
37 result = "driver-linux"
38 elif platform == "win32":
39 result = "driver-win.exe"
40 return result
41
42
43 async def run_driver_async() -> Connection:
44 package_path = get_file_dirname()
45 driver_name = compute_driver_name()
46 driver_executable = package_path / "drivers" / driver_name
47
48 proc = await asyncio.create_subprocess_exec(
49 str(driver_executable),
50 stdin=asyncio.subprocess.PIPE,
51 stdout=asyncio.subprocess.PIPE,
52 stderr=asyncio.subprocess.PIPE,
53 limit=32768,
54 )
55 assert proc.stdout
56 assert proc.stdin
57 connection = Connection(
58 proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()
59 )
60 return connection
61
62
63 def run_driver() -> Connection:
64 loop = asyncio.get_event_loop()
65 if loop.is_running():
66 raise Error("Can only run one Playwright at a time.")
67 return loop.run_until_complete(run_driver_async())
68
69
70 class SyncPlaywrightContextManager:
71 def __init__(self) -> None:
72 self._connection = run_driver()
73 self._playwright: SyncPlaywright
74
75 def __enter__(self) -> SyncPlaywright:
76 g_self = greenlet.getcurrent()
77
78 def callback_wrapper(playwright_impl: Playwright) -> None:
79 self._playwright = SyncPlaywright(playwright_impl)
80 g_self.switch()
81
82 self._connection.call_on_object_with_known_name("Playwright", callback_wrapper)
83 set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))
84 dispatcher_fiber().switch()
85 return self._playwright
86
87 def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
88 self._connection.stop_sync()
89
90
91 class AsyncPlaywrightContextManager:
92 def __init__(self) -> None:
93 self._connection: Connection
94
95 async def __aenter__(self) -> AsyncPlaywright:
96 self._connection = await run_driver_async()
97 self._connection.run_async()
98 return AsyncPlaywright(
99 await self._connection.wait_for_object_with_known_name("Playwright")
100 )
101
102 async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
103 self._connection.stop_async()
104
105
106 if sys.platform == "win32":
107 # Use ProactorEventLoop in 3.7, which is default in 3.8
108 loop = asyncio.ProactorEventLoop()
109 asyncio.set_event_loop(loop)
110
111
112 def main() -> None:
113 if "install" not in sys.argv:
114 print('Run "python -m playwright install" to complete installation')
115 return
116 package_path = get_file_dirname()
117 driver_name = compute_driver_name()
118 driver_executable = package_path / "drivers" / driver_name
119 print("Installing the browsers...")
120 subprocess.check_call(f"{driver_executable} install", shell=True)
121
122 print("Playwright is now ready for use")
123
[end of playwright/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/playwright/main.py b/playwright/main.py
--- a/playwright/main.py
+++ b/playwright/main.py
@@ -49,7 +49,7 @@
str(driver_executable),
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
- stderr=asyncio.subprocess.PIPE,
+ stderr=sys.stderr,
limit=32768,
)
assert proc.stdout
| {"golden_diff": "diff --git a/playwright/main.py b/playwright/main.py\n--- a/playwright/main.py\n+++ b/playwright/main.py\n@@ -49,7 +49,7 @@\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n- stderr=asyncio.subprocess.PIPE,\n+ stderr=sys.stderr,\n limit=32768,\n )\n assert proc.stdout\n", "issue": "DEBUG outputs won't get forwarded\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport asyncio\nimport subprocess\nimport sys\nfrom typing import Any\n\nfrom greenlet import greenlet\n\nfrom playwright.async_api import Playwright as AsyncPlaywright\nfrom playwright.connection import Connection\nfrom playwright.helper import Error\nfrom playwright.object_factory import create_remote_object\nfrom playwright.path_utils import get_file_dirname\nfrom playwright.playwright import Playwright\nfrom playwright.sync_api import Playwright as SyncPlaywright\nfrom playwright.sync_base import dispatcher_fiber, set_dispatcher_fiber\n\n\ndef compute_driver_name() -> str:\n platform = sys.platform\n if platform == \"darwin\":\n result = \"driver-macos\"\n elif platform == \"linux\":\n result = \"driver-linux\"\n elif platform == \"win32\":\n result = \"driver-win.exe\"\n return result\n\n\nasync def run_driver_async() -> Connection:\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n\n proc = await asyncio.create_subprocess_exec(\n str(driver_executable),\n stdin=asyncio.subprocess.PIPE,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n limit=32768,\n )\n assert proc.stdout\n assert proc.stdin\n connection = Connection(\n proc.stdout, proc.stdin, create_remote_object, asyncio.get_event_loop()\n )\n return connection\n\n\ndef run_driver() -> Connection:\n loop = asyncio.get_event_loop()\n if loop.is_running():\n raise Error(\"Can only run one Playwright at a time.\")\n return loop.run_until_complete(run_driver_async())\n\n\nclass SyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection = run_driver()\n self._playwright: SyncPlaywright\n\n def __enter__(self) -> SyncPlaywright:\n g_self = greenlet.getcurrent()\n\n def callback_wrapper(playwright_impl: Playwright) -> None:\n self._playwright = SyncPlaywright(playwright_impl)\n g_self.switch()\n\n self._connection.call_on_object_with_known_name(\"Playwright\", callback_wrapper)\n set_dispatcher_fiber(greenlet(lambda: self._connection.run_sync()))\n dispatcher_fiber().switch()\n return self._playwright\n\n def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_sync()\n\n\nclass AsyncPlaywrightContextManager:\n def __init__(self) -> None:\n self._connection: Connection\n\n async def __aenter__(self) -> AsyncPlaywright:\n self._connection = await run_driver_async()\n self._connection.run_async()\n return AsyncPlaywright(\n await self._connection.wait_for_object_with_known_name(\"Playwright\")\n )\n\n async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:\n self._connection.stop_async()\n\n\nif sys.platform == \"win32\":\n # Use ProactorEventLoop in 3.7, which is default in 3.8\n loop = asyncio.ProactorEventLoop()\n asyncio.set_event_loop(loop)\n\n\ndef main() -> None:\n if \"install\" not in sys.argv:\n print('Run \"python -m playwright install\" to complete installation')\n return\n package_path = get_file_dirname()\n driver_name = compute_driver_name()\n driver_executable = package_path / \"drivers\" / driver_name\n print(\"Installing the browsers...\")\n subprocess.check_call(f\"{driver_executable} install\", shell=True)\n\n print(\"Playwright is now ready for use\")\n", "path": "playwright/main.py"}]} | 1,716 | 97 |
gh_patches_debug_20086 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-3911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add rich equality comparison to `WriteAccessAllowed`
The comparison should be based on the `web_app_name` attribute only.
See https://github.com/python-telegram-bot/python-telegram-bot/pull/3898#discussion_r1337582872
</issue>
<code>
[start of telegram/_writeaccessallowed.py]
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2023
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains objects related to the write access allowed service message."""
20 from typing import Optional
21
22 from telegram._telegramobject import TelegramObject
23 from telegram._utils.types import JSONDict
24
25
26 class WriteAccessAllowed(TelegramObject):
27 """
28 This object represents a service message about a user allowing a bot to write messages after
29 adding the bot to the attachment menu or launching a Web App from a link.
30
31 .. versionadded:: 20.0
32
33 Args:
34 web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.
35
36 .. versionadded:: 20.3
37
38 Attributes:
39 web_app_name (:obj:`str`): Optional. Name of the Web App which was launched from a link.
40
41 .. versionadded:: 20.3
42
43 """
44
45 __slots__ = ("web_app_name",)
46
47 def __init__(
48 self, web_app_name: Optional[str] = None, *, api_kwargs: Optional[JSONDict] = None
49 ):
50 super().__init__(api_kwargs=api_kwargs)
51 self.web_app_name: Optional[str] = web_app_name
52
53 self._freeze()
54
[end of telegram/_writeaccessallowed.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram/_writeaccessallowed.py b/telegram/_writeaccessallowed.py
--- a/telegram/_writeaccessallowed.py
+++ b/telegram/_writeaccessallowed.py
@@ -28,7 +28,12 @@
This object represents a service message about a user allowing a bot to write messages after
adding the bot to the attachment menu or launching a Web App from a link.
+ Objects of this class are comparable in terms of equality. Two objects of this class are
+ considered equal, if their :attr:`web_app_name` is equal.
+
.. versionadded:: 20.0
+ .. versionchanged:: NEXT.VERSION
+ Added custom equality comparison for objects of this class.
Args:
web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.
@@ -50,4 +55,6 @@
super().__init__(api_kwargs=api_kwargs)
self.web_app_name: Optional[str] = web_app_name
+ self._id_attrs = (self.web_app_name,)
+
self._freeze()
| {"golden_diff": "diff --git a/telegram/_writeaccessallowed.py b/telegram/_writeaccessallowed.py\n--- a/telegram/_writeaccessallowed.py\n+++ b/telegram/_writeaccessallowed.py\n@@ -28,7 +28,12 @@\n This object represents a service message about a user allowing a bot to write messages after\n adding the bot to the attachment menu or launching a Web App from a link.\n \n+ Objects of this class are comparable in terms of equality. Two objects of this class are\n+ considered equal, if their :attr:`web_app_name` is equal.\n+\n .. versionadded:: 20.0\n+ .. versionchanged:: NEXT.VERSION\n+ Added custom equality comparison for objects of this class.\n \n Args:\n web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.\n@@ -50,4 +55,6 @@\n super().__init__(api_kwargs=api_kwargs)\n self.web_app_name: Optional[str] = web_app_name\n \n+ self._id_attrs = (self.web_app_name,)\n+\n self._freeze()\n", "issue": "Add rich equality comparison to `WriteAccessAllowed`\nThe comparison should be based on the `web_app_name` attribute only.\r\n\r\nSee https://github.com/python-telegram-bot/python-telegram-bot/pull/3898#discussion_r1337582872\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains objects related to the write access allowed service message.\"\"\"\nfrom typing import Optional\n\nfrom telegram._telegramobject import TelegramObject\nfrom telegram._utils.types import JSONDict\n\n\nclass WriteAccessAllowed(TelegramObject):\n \"\"\"\n This object represents a service message about a user allowing a bot to write messages after\n adding the bot to the attachment menu or launching a Web App from a link.\n\n .. versionadded:: 20.0\n\n Args:\n web_app_name (:obj:`str`, optional): Name of the Web App which was launched from a link.\n\n .. versionadded:: 20.3\n\n Attributes:\n web_app_name (:obj:`str`): Optional. Name of the Web App which was launched from a link.\n\n .. versionadded:: 20.3\n\n \"\"\"\n\n __slots__ = (\"web_app_name\",)\n\n def __init__(\n self, web_app_name: Optional[str] = None, *, api_kwargs: Optional[JSONDict] = None\n ):\n super().__init__(api_kwargs=api_kwargs)\n self.web_app_name: Optional[str] = web_app_name\n\n self._freeze()\n", "path": "telegram/_writeaccessallowed.py"}]} | 1,149 | 243 |
gh_patches_debug_2450 | rasdani/github-patches | git_diff | MAKENTNU__web-204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix delete permissions for course registration
</issue>
<code>
[start of make_queue/views/admin/course.py]
1 import io
2
3 import xlsxwriter
4 from django.contrib.auth.mixins import PermissionRequiredMixin
5 from django.db.models import Q
6 from django.http import HttpResponse
7 from django.shortcuts import redirect
8 from django.urls import reverse
9 from django.views.generic import TemplateView, View, CreateView, UpdateView, DeleteView
10
11 from make_queue.forms import Printer3DCourseForm
12 from make_queue.models.course import Printer3DCourse
13
14
15 class CourseView(TemplateView):
16 template_name = "make_queue/course/course_panel.html"
17
18 def get_context_data(self, **kwargs):
19 context_data = super().get_context_data(**kwargs)
20 context_data.update({
21 "registrations": Printer3DCourse.objects.order_by("name"),
22 "possible_statuses": Printer3DCourse.STATUS_CHOICES,
23 })
24 return context_data
25
26
27 class CreateRegistrationView(PermissionRequiredMixin, CreateView):
28 is_next = False
29 model = Printer3DCourse
30 form_class = Printer3DCourseForm
31 template_name = "make_queue/course/registration_create.html"
32 permission_required = (
33 "make_queue.add_printer3dcourse",
34 )
35
36 def get_context_data(self, **kwargs):
37 context_data = super().get_context_data(**kwargs)
38 if self.is_next:
39 context_data["is_next"] = True
40 return context_data
41
42 def get_success_url(self):
43 return reverse("create_course_registration_success")
44
45
46 class EditRegistrationView(PermissionRequiredMixin, UpdateView):
47 model = Printer3DCourse
48 form_class = Printer3DCourseForm
49 template_name = "make_queue/course/registration_edit.html"
50 permission_required = (
51 "make_queue.change_printer3dcourse",
52 )
53
54 def get_success_url(self):
55 return reverse("course_panel")
56
57
58 class DeleteRegistrationView(PermissionRequiredMixin, DeleteView):
59 model = Printer3DCourse
60 permission_required = (
61 "make_queue.delete_printer3d_course",
62 )
63
64 def get_success_url(self):
65 return reverse("course_panel")
66
67
68 class BulkStatusUpdate(View):
69 """
70 Provides a method for bulk updating the status of course registrations
71 """
72
73 def post(self, request):
74 status = request.POST.get("status")
75 registrations = list(map(int, request.POST.getlist("users")))
76 Printer3DCourse.objects.filter(pk__in=registrations).update(status=status)
77
78 return redirect("course_panel")
79
80
81 class CourseXLSXView(View):
82
83 def post(self, request):
84 search_string = request.POST.get("search_text")
85 status_filter = request.POST.get("status_filter")
86
87 course_registrations = Printer3DCourse.objects.filter(
88 Q(username__icontains=search_string) | Q(name__icontains=search_string), status__icontains=status_filter)
89
90 # Use an in-memory output file, to avoid having to clean up the disk
91 output_file = io.BytesIO()
92
93 workbook = xlsxwriter.Workbook(output_file, {"in_memory": True})
94 worksheet = workbook.add_worksheet("Kursdeltagere")
95
96 # Styles
97 format_header = workbook.add_format({
98 "bold": True,
99 "font_size": 10,
100 "font_name": "Arial",
101 "font_color": "#000000",
102 "bg_color": "#f8c700",
103 "border": 1,
104 "border_color": "#000000",
105 })
106
107 format_row = workbook.add_format({
108 "font_size": 10,
109 "font_name": "Arial",
110 "font_color": "#000000",
111 "bg_color": "#fff2cc",
112 "border": 1,
113 "border_color": "#000000",
114 })
115
116 # Set column width
117 worksheet.set_column("A:A", 40)
118 worksheet.set_column("B:B", 20)
119 worksheet.set_column("C:C", 15)
120 worksheet.set_column("D:D", 10)
121
122 # Header
123 worksheet.write(0, 0, "Navn", format_header)
124 worksheet.write(0, 1, "Brukernavn", format_header)
125 worksheet.write(0, 2, "Kortnummer", format_header)
126 worksheet.write(0, 3, "Dato", format_header)
127
128 for index, registration in enumerate(course_registrations):
129 worksheet.write(index + 1, 0, registration.name, format_row)
130 worksheet.write(index + 1, 1, registration.username, format_row)
131 worksheet.write(index + 1, 2, registration.card_number if registration.card_number is not None else "",
132 format_row)
133 worksheet.write(index + 1, 3, registration.date.strftime("%Y-%m-%d"), format_row)
134
135 workbook.close()
136 output_file.seek(0)
137
138 response = HttpResponse(output_file.read(),
139 content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
140
141 response['Content-Disposition'] = 'attachment; filename="Kursdeltagere.xlsx"'
142
143 return response
144
[end of make_queue/views/admin/course.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/make_queue/views/admin/course.py b/make_queue/views/admin/course.py
--- a/make_queue/views/admin/course.py
+++ b/make_queue/views/admin/course.py
@@ -58,7 +58,7 @@
class DeleteRegistrationView(PermissionRequiredMixin, DeleteView):
model = Printer3DCourse
permission_required = (
- "make_queue.delete_printer3d_course",
+ "make_queue.delete_printer3dcourse",
)
def get_success_url(self):
| {"golden_diff": "diff --git a/make_queue/views/admin/course.py b/make_queue/views/admin/course.py\n--- a/make_queue/views/admin/course.py\n+++ b/make_queue/views/admin/course.py\n@@ -58,7 +58,7 @@\n class DeleteRegistrationView(PermissionRequiredMixin, DeleteView):\n model = Printer3DCourse\n permission_required = (\n- \"make_queue.delete_printer3d_course\",\n+ \"make_queue.delete_printer3dcourse\",\n )\n \n def get_success_url(self):\n", "issue": "Fix delete permissions for course registration\n\n", "before_files": [{"content": "import io\n\nimport xlsxwriter\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView, View, CreateView, UpdateView, DeleteView\n\nfrom make_queue.forms import Printer3DCourseForm\nfrom make_queue.models.course import Printer3DCourse\n\n\nclass CourseView(TemplateView):\n template_name = \"make_queue/course/course_panel.html\"\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n context_data.update({\n \"registrations\": Printer3DCourse.objects.order_by(\"name\"),\n \"possible_statuses\": Printer3DCourse.STATUS_CHOICES,\n })\n return context_data\n\n\nclass CreateRegistrationView(PermissionRequiredMixin, CreateView):\n is_next = False\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_create.html\"\n permission_required = (\n \"make_queue.add_printer3dcourse\",\n )\n\n def get_context_data(self, **kwargs):\n context_data = super().get_context_data(**kwargs)\n if self.is_next:\n context_data[\"is_next\"] = True\n return context_data\n\n def get_success_url(self):\n return reverse(\"create_course_registration_success\")\n\n\nclass EditRegistrationView(PermissionRequiredMixin, UpdateView):\n model = Printer3DCourse\n form_class = Printer3DCourseForm\n template_name = \"make_queue/course/registration_edit.html\"\n permission_required = (\n \"make_queue.change_printer3dcourse\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass DeleteRegistrationView(PermissionRequiredMixin, DeleteView):\n model = Printer3DCourse\n permission_required = (\n \"make_queue.delete_printer3d_course\",\n )\n\n def get_success_url(self):\n return reverse(\"course_panel\")\n\n\nclass BulkStatusUpdate(View):\n \"\"\"\n Provides a method for bulk updating the status of course registrations\n \"\"\"\n\n def post(self, request):\n status = request.POST.get(\"status\")\n registrations = list(map(int, request.POST.getlist(\"users\")))\n Printer3DCourse.objects.filter(pk__in=registrations).update(status=status)\n\n return redirect(\"course_panel\")\n\n\nclass CourseXLSXView(View):\n\n def post(self, request):\n search_string = request.POST.get(\"search_text\")\n status_filter = request.POST.get(\"status_filter\")\n\n course_registrations = Printer3DCourse.objects.filter(\n Q(username__icontains=search_string) | Q(name__icontains=search_string), status__icontains=status_filter)\n\n # Use an in-memory output file, to avoid having to clean up the disk\n output_file = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output_file, {\"in_memory\": True})\n worksheet = workbook.add_worksheet(\"Kursdeltagere\")\n\n # Styles\n format_header = workbook.add_format({\n \"bold\": True,\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#f8c700\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n format_row = workbook.add_format({\n \"font_size\": 10,\n \"font_name\": \"Arial\",\n \"font_color\": \"#000000\",\n \"bg_color\": \"#fff2cc\",\n \"border\": 1,\n \"border_color\": \"#000000\",\n })\n\n # Set column width\n worksheet.set_column(\"A:A\", 40)\n worksheet.set_column(\"B:B\", 20)\n worksheet.set_column(\"C:C\", 15)\n worksheet.set_column(\"D:D\", 10)\n\n # Header\n worksheet.write(0, 0, \"Navn\", format_header)\n worksheet.write(0, 1, \"Brukernavn\", format_header)\n worksheet.write(0, 2, \"Kortnummer\", format_header)\n worksheet.write(0, 3, \"Dato\", format_header)\n\n for index, registration in enumerate(course_registrations):\n worksheet.write(index + 1, 0, registration.name, format_row)\n worksheet.write(index + 1, 1, registration.username, format_row)\n worksheet.write(index + 1, 2, registration.card_number if registration.card_number is not None else \"\",\n format_row)\n worksheet.write(index + 1, 3, registration.date.strftime(\"%Y-%m-%d\"), format_row)\n\n workbook.close()\n output_file.seek(0)\n\n response = HttpResponse(output_file.read(),\n content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n\n response['Content-Disposition'] = 'attachment; filename=\"Kursdeltagere.xlsx\"'\n\n return response\n", "path": "make_queue/views/admin/course.py"}]} | 1,958 | 109 |
gh_patches_debug_10211 | rasdani/github-patches | git_diff | google__clusterfuzz-189 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
linting in CI works differently than locally
I'm pretty sure it is pylint 1.9.4.
See https://github.com/google/clusterfuzz/pull/185 for a discrepancy I noticed between running pylint locally and running it in CI.
When I upgraded my local copy of pylint to 1.9.4 I was able to discover the issue showing up in CI.
</issue>
<code>
[start of src/local/butler/lint.py]
1 # Copyright 2019 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Lint changed code in current branch."""
15
16 import os
17 import sys
18
19 from local.butler import common
20
21
22 def execute(_):
23 """Lint changed code."""
24 if "GOOGLE_CLOUDBUILD" in os.environ:
25 # Explicitly compare against master if we're running on the CI
26 _, output = common.execute('git diff --name-only master FETCH_HEAD')
27 else:
28 _, output = common.execute('git diff --name-only FETCH_HEAD')
29
30 py_changed_file_paths = [
31 f for f in output.splitlines() if f.endswith('.py') and
32 # Exclude auto-generated files.
33 not f.endswith('_pb2.py') and not f.endswith('_pb2_grpc.py')
34 ]
35 go_changed_file_paths = [f for f in output.splitlines() if f.endswith('.go')]
36
37 for file_path in py_changed_file_paths:
38 if os.path.exists(file_path):
39 common.execute('pylint ' + file_path)
40 common.execute('yapf -d ' + file_path)
41
42 golint_path = os.path.join('local', 'bin', 'golint')
43 for file_path in go_changed_file_paths:
44 if os.path.exists(file_path):
45 common.execute(golint_path + ' ' + file_path)
46
47 _, output = common.execute('gofmt -d ' + file_path)
48 if output.strip():
49 sys.exit(1)
50
[end of src/local/butler/lint.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/local/butler/lint.py b/src/local/butler/lint.py
--- a/src/local/butler/lint.py
+++ b/src/local/butler/lint.py
@@ -16,11 +16,15 @@
import os
import sys
+from local.butler import appengine
from local.butler import common
def execute(_):
"""Lint changed code."""
+ pythonpath = os.getenv('PYTHONPATH', '')
+ os.environ['PYTHONPATH'] = appengine.find_sdk_path() + ':' + pythonpath
+
if "GOOGLE_CLOUDBUILD" in os.environ:
# Explicitly compare against master if we're running on the CI
_, output = common.execute('git diff --name-only master FETCH_HEAD')
| {"golden_diff": "diff --git a/src/local/butler/lint.py b/src/local/butler/lint.py\n--- a/src/local/butler/lint.py\n+++ b/src/local/butler/lint.py\n@@ -16,11 +16,15 @@\n import os\n import sys\n \n+from local.butler import appengine\n from local.butler import common\n \n \n def execute(_):\n \"\"\"Lint changed code.\"\"\"\n+ pythonpath = os.getenv('PYTHONPATH', '')\n+ os.environ['PYTHONPATH'] = appengine.find_sdk_path() + ':' + pythonpath\n+\n if \"GOOGLE_CLOUDBUILD\" in os.environ:\n # Explicitly compare against master if we're running on the CI\n _, output = common.execute('git diff --name-only master FETCH_HEAD')\n", "issue": "linting in CI works differently than locally\nI'm pretty sure it is pylint 1.9.4.\r\nSee https://github.com/google/clusterfuzz/pull/185 for a discrepancy I noticed between running pylint locally and running it in CI.\r\n\r\nWhen I upgraded my local copy of pylint to 1.9.4 I was able to discover the issue showing up in CI.\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Lint changed code in current branch.\"\"\"\n\nimport os\nimport sys\n\nfrom local.butler import common\n\n\ndef execute(_):\n \"\"\"Lint changed code.\"\"\"\n if \"GOOGLE_CLOUDBUILD\" in os.environ:\n # Explicitly compare against master if we're running on the CI\n _, output = common.execute('git diff --name-only master FETCH_HEAD')\n else:\n _, output = common.execute('git diff --name-only FETCH_HEAD')\n\n py_changed_file_paths = [\n f for f in output.splitlines() if f.endswith('.py') and\n # Exclude auto-generated files.\n not f.endswith('_pb2.py') and not f.endswith('_pb2_grpc.py')\n ]\n go_changed_file_paths = [f for f in output.splitlines() if f.endswith('.go')]\n\n for file_path in py_changed_file_paths:\n if os.path.exists(file_path):\n common.execute('pylint ' + file_path)\n common.execute('yapf -d ' + file_path)\n\n golint_path = os.path.join('local', 'bin', 'golint')\n for file_path in go_changed_file_paths:\n if os.path.exists(file_path):\n common.execute(golint_path + ' ' + file_path)\n\n _, output = common.execute('gofmt -d ' + file_path)\n if output.strip():\n sys.exit(1)\n", "path": "src/local/butler/lint.py"}]} | 1,143 | 172 |
gh_patches_debug_281 | rasdani/github-patches | git_diff | vega__altair-3387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
minimum pyarrow version enforced even if pandas is installed
The error we are facing in an environment says:
```python
RuntimeError: The pyarrow package must be version 11.0.0 or greater. Found version 6.0.1
```
And is caused by these lines:
https://github.com/altair-viz/altair/blob/main/altair/utils/core.py#L591-L592
```python
# if data is specified and type is not, infer type from data
if "type" not in attrs:
if pyarrow_available() and data is not None and isinstance(data, DataFrameLike):
...
elif isinstance(data, pd.DataFrame):
# Fallback if pyarrow is not installed or if pandas is older than 1.5
```
In that particular environment pandas is installed by default and we are not able to upgrade pyarrow.
Now the altair specifications errors as the code never tries the pandas approach as it has found a pyarrow version that is too old.
</issue>
<code>
[start of altair/utils/_importers.py]
1 from types import ModuleType
2 from packaging.version import Version
3 from importlib.metadata import version as importlib_version
4
5
6 def import_vegafusion() -> ModuleType:
7 min_version = "1.5.0"
8 try:
9 version = importlib_version("vegafusion")
10 embed_version = importlib_version("vegafusion-python-embed")
11 if version != embed_version or Version(version) < Version(min_version):
12 raise RuntimeError(
13 "The versions of the vegafusion and vegafusion-python-embed packages must match\n"
14 f"and must be version {min_version} or greater.\n"
15 f"Found:\n"
16 f" - vegafusion=={version}\n"
17 f" - vegafusion-python-embed=={embed_version}\n"
18 )
19 import vegafusion as vf # type: ignore
20
21 return vf
22 except ImportError as err:
23 raise ImportError(
24 'The "vegafusion" data transformer and chart.transformed_data feature requires\n'
25 f"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\n"
26 "These can be installed with pip using:\n"
27 f' pip install "vegafusion[embed]>={min_version}"\n'
28 "Or with conda using:\n"
29 f' conda install -c conda-forge "vegafusion-python-embed>={min_version}" '
30 f'"vegafusion>={min_version}"\n\n'
31 f"ImportError: {err.args[0]}"
32 ) from err
33
34
35 def import_vl_convert() -> ModuleType:
36 min_version = "1.3.0"
37 try:
38 version = importlib_version("vl-convert-python")
39 if Version(version) < Version(min_version):
40 raise RuntimeError(
41 f"The vl-convert-python package must be version {min_version} or greater. "
42 f"Found version {version}"
43 )
44 import vl_convert as vlc
45
46 return vlc
47 except ImportError as err:
48 raise ImportError(
49 f"The vl-convert Vega-Lite compiler and file export feature requires\n"
50 f"version {min_version} or greater of the 'vl-convert-python' package. \n"
51 f"This can be installed with pip using:\n"
52 f' pip install "vl-convert-python>={min_version}"\n'
53 "or conda:\n"
54 f' conda install -c conda-forge "vl-convert-python>={min_version}"\n\n'
55 f"ImportError: {err.args[0]}"
56 ) from err
57
58
59 def vl_version_for_vl_convert() -> str:
60 from ..vegalite import SCHEMA_VERSION
61
62 # Compute VlConvert's vl_version string (of the form 'v5_2')
63 # from SCHEMA_VERSION (of the form 'v5.2.0')
64 return "_".join(SCHEMA_VERSION.split(".")[:2])
65
66
67 def import_pyarrow_interchange() -> ModuleType:
68 min_version = "11.0.0"
69 try:
70 version = importlib_version("pyarrow")
71
72 if Version(version) < Version(min_version):
73 raise RuntimeError(
74 f"The pyarrow package must be version {min_version} or greater. "
75 f"Found version {version}"
76 )
77 import pyarrow.interchange as pi
78
79 return pi
80 except ImportError as err:
81 raise ImportError(
82 f"Usage of the DataFrame Interchange Protocol requires\n"
83 f"version {min_version} or greater of the pyarrow package. \n"
84 f"This can be installed with pip using:\n"
85 f' pip install "pyarrow>={min_version}"\n'
86 "or conda:\n"
87 f' conda install -c conda-forge "pyarrow>={min_version}"\n\n'
88 f"ImportError: {err.args[0]}"
89 ) from err
90
91
92 def pyarrow_available() -> bool:
93 try:
94 import_pyarrow_interchange()
95 return True
96 except ImportError:
97 return False
98
[end of altair/utils/_importers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py
--- a/altair/utils/_importers.py
+++ b/altair/utils/_importers.py
@@ -93,5 +93,5 @@
try:
import_pyarrow_interchange()
return True
- except ImportError:
+ except (ImportError, RuntimeError):
return False
| {"golden_diff": "diff --git a/altair/utils/_importers.py b/altair/utils/_importers.py\n--- a/altair/utils/_importers.py\n+++ b/altair/utils/_importers.py\n@@ -93,5 +93,5 @@\n try:\n import_pyarrow_interchange()\n return True\n- except ImportError:\n+ except (ImportError, RuntimeError):\n return False\n", "issue": "minimum pyarrow version enforced even if pandas is installed\nThe error we are facing in an environment says:\r\n```python\r\nRuntimeError: The pyarrow package must be version 11.0.0 or greater. Found version 6.0.1\r\n```\r\n\r\nAnd is caused by these lines:\r\nhttps://github.com/altair-viz/altair/blob/main/altair/utils/core.py#L591-L592\r\n\r\n```python\r\n # if data is specified and type is not, infer type from data\r\n if \"type\" not in attrs:\r\n if pyarrow_available() and data is not None and isinstance(data, DataFrameLike):\r\n ...\r\n\r\n elif isinstance(data, pd.DataFrame):\r\n # Fallback if pyarrow is not installed or if pandas is older than 1.5\r\n```\r\nIn that particular environment pandas is installed by default and we are not able to upgrade pyarrow. \r\n\r\nNow the altair specifications errors as the code never tries the pandas approach as it has found a pyarrow version that is too old. \r\n\n", "before_files": [{"content": "from types import ModuleType\nfrom packaging.version import Version\nfrom importlib.metadata import version as importlib_version\n\n\ndef import_vegafusion() -> ModuleType:\n min_version = \"1.5.0\"\n try:\n version = importlib_version(\"vegafusion\")\n embed_version = importlib_version(\"vegafusion-python-embed\")\n if version != embed_version or Version(version) < Version(min_version):\n raise RuntimeError(\n \"The versions of the vegafusion and vegafusion-python-embed packages must match\\n\"\n f\"and must be version {min_version} or greater.\\n\"\n f\"Found:\\n\"\n f\" - vegafusion=={version}\\n\"\n f\" - vegafusion-python-embed=={embed_version}\\n\"\n )\n import vegafusion as vf # type: ignore\n\n return vf\n except ImportError as err:\n raise ImportError(\n 'The \"vegafusion\" data transformer and chart.transformed_data feature requires\\n'\n f\"version {min_version} or greater of the 'vegafusion-python-embed' and 'vegafusion' packages.\\n\"\n \"These can be installed with pip using:\\n\"\n f' pip install \"vegafusion[embed]>={min_version}\"\\n'\n \"Or with conda using:\\n\"\n f' conda install -c conda-forge \"vegafusion-python-embed>={min_version}\" '\n f'\"vegafusion>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef import_vl_convert() -> ModuleType:\n min_version = \"1.3.0\"\n try:\n version = importlib_version(\"vl-convert-python\")\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The vl-convert-python package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import vl_convert as vlc\n\n return vlc\n except ImportError as err:\n raise ImportError(\n f\"The vl-convert Vega-Lite compiler and file export feature requires\\n\"\n f\"version {min_version} or greater of the 'vl-convert-python' package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"vl-convert-python>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"vl-convert-python>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef vl_version_for_vl_convert() -> str:\n from ..vegalite import SCHEMA_VERSION\n\n # Compute VlConvert's vl_version string (of the form 'v5_2')\n # from SCHEMA_VERSION (of the form 'v5.2.0')\n return \"_\".join(SCHEMA_VERSION.split(\".\")[:2])\n\n\ndef import_pyarrow_interchange() -> ModuleType:\n min_version = \"11.0.0\"\n try:\n version = importlib_version(\"pyarrow\")\n\n if Version(version) < Version(min_version):\n raise RuntimeError(\n f\"The pyarrow package must be version {min_version} or greater. \"\n f\"Found version {version}\"\n )\n import pyarrow.interchange as pi\n\n return pi\n except ImportError as err:\n raise ImportError(\n f\"Usage of the DataFrame Interchange Protocol requires\\n\"\n f\"version {min_version} or greater of the pyarrow package. \\n\"\n f\"This can be installed with pip using:\\n\"\n f' pip install \"pyarrow>={min_version}\"\\n'\n \"or conda:\\n\"\n f' conda install -c conda-forge \"pyarrow>={min_version}\"\\n\\n'\n f\"ImportError: {err.args[0]}\"\n ) from err\n\n\ndef pyarrow_available() -> bool:\n try:\n import_pyarrow_interchange()\n return True\n except ImportError:\n return False\n", "path": "altair/utils/_importers.py"}]} | 1,856 | 89 |
gh_patches_debug_2526 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1049 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Fixture use in pytest
# Description
In pytest `v4.0.0` the [direct call of a fixture results in an error](https://travis-ci.org/diana-hep/pyhf/jobs/455364238#L661-L669).
```
==================================== ERRORS ====================================
__________________ ERROR collecting tests/test_validation.py ___________________
tests/test_validation.py:13: in <module>
def spec_1bin_shapesys(source=source_1bin_example1()):
E _pytest.warning_types.RemovedInPytest4Warning: Fixture "source_1bin_example1" called directly. Fixtures are not meant to be called directly, are created automatically when test functions request them as parameters. See https://docs.pytest.org/en/latest/fixture.html for more information.
__________________ ERROR collecting tests/test_validation.py ___________________
tests/test_validation.py:13: in <module>
def spec_1bin_shapesys(source=source_1bin_example1()):
E _pytest.warning_types.RemovedInPytest4Warning: Fixture "source_1bin_example1" called directly. Fixtures are not meant to be called directly, are created automatically when test functions request them as parameters. See https://docs.pytest.org/en/latest/fixture.html for more information.
```
This requires changing the way that pytest is used a bit.
This was noticed in preparation of PR #369
# Checklist
- [x] Run `git fetch` to get the most up to date version of `master`
- [x] Searched through existing Issues to confirm this is not a duplicate issue
- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue
</issue>
<code>
[start of setup.py]
1 from setuptools import setup
2
3 extras_require = {
4 'shellcomplete': ['click_completion'],
5 'tensorflow': [
6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major
7 'tensorflow-probability~=0.10.0',
8 ],
9 'torch': ['torch~=1.2'],
10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],
11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes
12 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use "name" keyword in MINUIT optimizer
13 }
14 extras_require['backends'] = sorted(
15 set(
16 extras_require['tensorflow']
17 + extras_require['torch']
18 + extras_require['jax']
19 + extras_require['minuit']
20 )
21 )
22 extras_require['contrib'] = sorted(set(['matplotlib']))
23 extras_require['lint'] = sorted(set(['pyflakes', 'black']))
24
25 extras_require['test'] = sorted(
26 set(
27 extras_require['backends']
28 + extras_require['xmlio']
29 + extras_require['contrib']
30 + extras_require['shellcomplete']
31 + [
32 'pytest~=3.5',
33 'pytest-cov>=2.5.1',
34 'pytest-mock',
35 'pytest-benchmark[histogram]',
36 'pytest-console-scripts',
37 'pytest-mpl',
38 'pydocstyle',
39 'coverage>=4.0', # coveralls
40 'papermill~=2.0',
41 'nteract-scrapbook~=0.2',
42 'jupyter',
43 'uproot~=3.3',
44 'graphviz',
45 'jsonpatch',
46 ]
47 )
48 )
49 extras_require['docs'] = sorted(
50 set(
51 [
52 'sphinx>=3.1.2',
53 'sphinxcontrib-bibtex',
54 'sphinx-click',
55 'sphinx_rtd_theme',
56 'nbsphinx',
57 'ipywidgets',
58 'sphinx-issues',
59 'sphinx-copybutton>0.2.9',
60 ]
61 )
62 )
63 extras_require['develop'] = sorted(
64 set(
65 extras_require['docs']
66 + extras_require['lint']
67 + extras_require['test']
68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']
69 )
70 )
71 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
72
73
74 setup(
75 extras_require=extras_require,
76 use_scm_version=lambda: {'local_scheme': lambda version: ''},
77 )
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -29,7 +29,7 @@
+ extras_require['contrib']
+ extras_require['shellcomplete']
+ [
- 'pytest~=3.5',
+ 'pytest~=6.0',
'pytest-cov>=2.5.1',
'pytest-mock',
'pytest-benchmark[histogram]',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -29,7 +29,7 @@\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n- 'pytest~=3.5',\n+ 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n", "issue": "Fix Fixture use in pytest\n# Description\r\n\r\nIn pytest `v4.0.0` the [direct call of a fixture results in an error](https://travis-ci.org/diana-hep/pyhf/jobs/455364238#L661-L669). \r\n\r\n```\r\n==================================== ERRORS ====================================\r\n__________________ ERROR collecting tests/test_validation.py ___________________\r\ntests/test_validation.py:13: in <module>\r\n def spec_1bin_shapesys(source=source_1bin_example1()):\r\nE _pytest.warning_types.RemovedInPytest4Warning: Fixture \"source_1bin_example1\" called directly. Fixtures are not meant to be called directly, are created automatically when test functions request them as parameters. See https://docs.pytest.org/en/latest/fixture.html for more information.\r\n__________________ ERROR collecting tests/test_validation.py ___________________\r\ntests/test_validation.py:13: in <module>\r\n def spec_1bin_shapesys(source=source_1bin_example1()):\r\nE _pytest.warning_types.RemovedInPytest4Warning: Fixture \"source_1bin_example1\" called directly. Fixtures are not meant to be called directly, are created automatically when test functions request them as parameters. See https://docs.pytest.org/en/latest/fixture.html for more information.\r\n```\r\n\r\nThis requires changing the way that pytest is used a bit.\r\n\r\nThis was noticed in preparation of PR #369 \r\n\r\n# Checklist\r\n\r\n- [x] Run `git fetch` to get the most up to date version of `master`\r\n- [x] Searched through existing Issues to confirm this is not a duplicate issue\r\n- [x] Filled out the Description, Expected Behavior, Actual Behavior, and Steps to Reproduce sections above or have edited/removed them in a way that fully describes the issue\r\n\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4,>=1.4.3'], # Use \"name\" keyword in MINUIT optimizer\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]} | 1,652 | 96 |
gh_patches_debug_345 | rasdani/github-patches | git_diff | NVIDIA__apex-564 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
RuntimeError: "GeluCUDAKernelImpl" not implemented for 'Half'
PyTorch 1.2 introduced the `gelu` activation function. Unfortunately, this leads to terminal errors when using with AMP.
Trace (`self.activation` is `gelu`):
```
Traceback (most recent call last):
File "predict.py", line 282, in <module>
predictor.predict()
File "predict.py", line 74, in predict
fig = trainer.train()
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py", line 232, in train
self._process('train', epoch)
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py", line 124, in _process
preds = self.model(input_ids, attention_mask=input_mask)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/parallel/distributed.py", line 442, in forward
output = self.module(*inputs[0], **kwargs[0])
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 541, in __call__
result = self.forward(*input, **kwargs)
File "/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/models.py", line 140, in forward
cls_output = self.activation(cls_output)
File "/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/functional.py", line 1126, in gelu
return torch._C._nn.gelu(input)
RuntimeError: "GeluCUDAKernelImpl" not implemented for 'Half'
```
</issue>
<code>
[start of apex/amp/lists/functional_overrides.py]
1
2 # TODO: think about the following two. They do weird things.
3 # - torch.nn.utils.clip_grad (but it should always be fp32 anyway)
4 # - torch.nn.utils.weight_norm
5
6 # Notes:
7 # F.instance_norm uses batch_norm internally. Which correctly handles
8 # fp16 in/out with fp32 weights. So we shouldn't do anything for
9 # either of these.
10 # F.normalize calls `input.norm()` internally, so it's redundant, but
11 # kept here in case impl. changes.
12 # F.cosine_similarity is same: calls `x.norm()` internally.
13
14 import torch.nn.functional
15
16 MODULE = torch.nn.functional
17
18 FP16_FUNCS = [
19 'conv1d',
20 'conv2d',
21 'conv3d',
22 'conv_transpose1d',
23 'conv_transpose2d',
24 'conv_transpose3d',
25 'conv_tbc', # Undocumented / maybe new?
26 'linear',
27 ]
28
29 FP32_FUNCS = [
30
31 # Interpolation/Upsampling TODO: Remove for 1.2
32 'interpolate',
33 'grid_sample',
34
35 # Pointwise
36 'softplus',
37 'softmin',
38 'log_softmax',
39 'softmax',
40
41 # Normalization
42 'layer_norm',
43 'group_norm',
44 'local_response_norm',
45 'normalize',
46 'cosine_similarity',
47
48 # Loss functions
49 # TODO: which of these can be fp16?
50 'poisson_nll_loss',
51 'cosine_embedding_loss',
52 'cross_entropy',
53 'hinge_embedding_loss',
54 'kl_div',
55 'l1_loss',
56 'mse_loss',
57 'margin_ranking_loss',
58 'multilabel_margin_loss',
59 'multilabel_soft_margin_loss',
60 'multi_margin_loss',
61 'nll_loss',
62 'binary_cross_entropy_with_logits',
63 'smooth_l1_loss',
64 'soft_margin_loss',
65 'triplet_margin_loss'
66 ]
67
68 BANNED_FUNCS = [
69 ('binary_cross_entropy',
70 ("\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` "
71 "It requires that the output of the previous function be already a FloatTensor. \n\n"
72 "Most models have a Sigmoid right before BCELoss. In that case, you can use\n"
73 " torch.nn.BCEWithLogitsLoss\nto combine Sigmoid+BCELoss into a single layer "
74 "that is compatible with amp.\nAnother option is to add\n"
75 " amp.register_float_function(torch, 'sigmoid')\nbefore calling `amp.init()`.\n"
76 "If you _really_ know what you are doing, you can disable this warning by passing "
77 "allow_banned=True to `amp.init()`."))
78 ]
79
[end of apex/amp/lists/functional_overrides.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apex/amp/lists/functional_overrides.py b/apex/amp/lists/functional_overrides.py
--- a/apex/amp/lists/functional_overrides.py
+++ b/apex/amp/lists/functional_overrides.py
@@ -37,7 +37,8 @@
'softmin',
'log_softmax',
'softmax',
-
+ 'gelu',
+
# Normalization
'layer_norm',
'group_norm',
| {"golden_diff": "diff --git a/apex/amp/lists/functional_overrides.py b/apex/amp/lists/functional_overrides.py\n--- a/apex/amp/lists/functional_overrides.py\n+++ b/apex/amp/lists/functional_overrides.py\n@@ -37,7 +37,8 @@\n 'softmin',\n 'log_softmax',\n 'softmax',\n-\n+ 'gelu',\n+ \n # Normalization\n 'layer_norm',\n 'group_norm',\n", "issue": "RuntimeError: \"GeluCUDAKernelImpl\" not implemented for 'Half'\nPyTorch 1.2 introduced the `gelu` activation function. Unfortunately, this leads to terminal errors when using with AMP.\r\n\r\nTrace (`self.activation` is `gelu`): \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"predict.py\", line 282, in <module>\r\n predictor.predict()\r\n File \"predict.py\", line 74, in predict\r\n fig = trainer.train()\r\n File \"/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py\", line 232, in train\r\n self._process('train', epoch)\r\n File \"/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/TransformerTrainer.py\", line 124, in _process\r\n preds = self.model(input_ids, attention_mask=input_mask)\r\n File \"/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 541, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/parallel/distributed.py\", line 442, in forward\r\n output = self.module(*inputs[0], **kwargs[0])\r\n File \"/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/modules/module.py\", line 541, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/bram/Python/projects/transformer-classifiers/transformer_classifiers/models.py\", line 140, in forward\r\n cls_output = self.activation(cls_output)\r\n File \"/home/bram/.local/share/virtualenvs/transformer-classifiers-x27iJBv7/lib/python3.7/site-packages/torch/nn/functional.py\", line 1126, in gelu\r\n return torch._C._nn.gelu(input)\r\nRuntimeError: \"GeluCUDAKernelImpl\" not implemented for 'Half'\r\n```\n", "before_files": [{"content": "\n# TODO: think about the following two. They do weird things.\n# - torch.nn.utils.clip_grad (but it should always be fp32 anyway)\n# - torch.nn.utils.weight_norm\n\n# Notes:\n# F.instance_norm uses batch_norm internally. Which correctly handles\n# fp16 in/out with fp32 weights. So we shouldn't do anything for\n# either of these.\n# F.normalize calls `input.norm()` internally, so it's redundant, but\n# kept here in case impl. changes.\n# F.cosine_similarity is same: calls `x.norm()` internally.\n\nimport torch.nn.functional\n\nMODULE = torch.nn.functional\n\nFP16_FUNCS = [\n 'conv1d',\n 'conv2d',\n 'conv3d',\n 'conv_transpose1d',\n 'conv_transpose2d',\n 'conv_transpose3d',\n 'conv_tbc', # Undocumented / maybe new?\n 'linear',\n]\n\nFP32_FUNCS = [\n\n # Interpolation/Upsampling TODO: Remove for 1.2\n 'interpolate',\n 'grid_sample',\n\n # Pointwise\n 'softplus',\n 'softmin',\n 'log_softmax',\n 'softmax',\n\n # Normalization\n 'layer_norm',\n 'group_norm',\n 'local_response_norm',\n 'normalize',\n 'cosine_similarity',\n\n # Loss functions\n # TODO: which of these can be fp16?\n 'poisson_nll_loss',\n 'cosine_embedding_loss',\n 'cross_entropy',\n 'hinge_embedding_loss',\n 'kl_div',\n 'l1_loss',\n 'mse_loss',\n 'margin_ranking_loss',\n 'multilabel_margin_loss',\n 'multilabel_soft_margin_loss',\n 'multi_margin_loss',\n 'nll_loss',\n 'binary_cross_entropy_with_logits',\n 'smooth_l1_loss',\n 'soft_margin_loss',\n 'triplet_margin_loss'\n]\n\nBANNED_FUNCS = [\n ('binary_cross_entropy',\n (\"\\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` \"\n \"It requires that the output of the previous function be already a FloatTensor. \\n\\n\"\n \"Most models have a Sigmoid right before BCELoss. In that case, you can use\\n\"\n \" torch.nn.BCEWithLogitsLoss\\nto combine Sigmoid+BCELoss into a single layer \"\n \"that is compatible with amp.\\nAnother option is to add\\n\"\n \" amp.register_float_function(torch, 'sigmoid')\\nbefore calling `amp.init()`.\\n\"\n \"If you _really_ know what you are doing, you can disable this warning by passing \"\n \"allow_banned=True to `amp.init()`.\"))\n]\n", "path": "apex/amp/lists/functional_overrides.py"}]} | 1,820 | 103 |
gh_patches_debug_23388 | rasdani/github-patches | git_diff | cal-itp__benefits-1550 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Grid-width issue
Related to #1545
Almost all instances of `col-lg-10` app should now be `col-lg-8`.
</issue>
<code>
[start of benefits/eligibility/forms.py]
1 """
2 The eligibility application: Form definition for the eligibility verification flow.
3 """
4 import logging
5
6 from django import forms
7 from django.utils.translation import gettext_lazy as _
8
9 from benefits.core import models, recaptcha, widgets
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class EligibilityVerifierSelectionForm(forms.Form):
16 """Form to capture eligibility verifier selection."""
17
18 action_url = "eligibility:index"
19 id = "form-verifier-selection"
20 method = "POST"
21
22 verifier = forms.ChoiceField(label="", widget=widgets.VerifierRadioSelect)
23 # sets label to empty string so the radio_select template can override the label style
24 submit_value = _("eligibility.buttons.choose")
25
26 def __init__(self, agency: models.TransitAgency, *args, **kwargs):
27 super().__init__(*args, **kwargs)
28 verifiers = agency.eligibility_verifiers.all()
29
30 self.classes = "offset-lg-1 col-lg-9"
31 # second element is not used since we render the whole label using selection_label_template,
32 # therefore set to None
33 self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
34 self.fields["verifier"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}
35
36 def clean(self):
37 if not recaptcha.verify(self.data):
38 raise forms.ValidationError("reCAPTCHA failed")
39
40
41 class EligibilityVerificationForm(forms.Form):
42 """Form to collect eligibility verification details."""
43
44 action_url = "eligibility:confirm"
45 id = "form-eligibility-verification"
46 method = "POST"
47
48 submit_value = _("eligibility.forms.confirm.submit")
49 submitting_value = _("eligibility.forms.confirm.submitting")
50
51 _error_messages = {
52 "invalid": _("eligibility.forms.confirm.errors.invalid"),
53 "missing": _("eligibility.forms.confirm.errors.missing"),
54 }
55
56 def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):
57 super().__init__(*args, **kwargs)
58
59 self.classes = "offset-lg-3 col-lg-6"
60 sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)
61 if verifier.form_sub_pattern:
62 sub_widget.attrs.update({"pattern": verifier.form_sub_pattern})
63 if verifier.form_input_mode:
64 sub_widget.attrs.update({"inputmode": verifier.form_input_mode})
65 if verifier.form_max_length:
66 sub_widget.attrs.update({"maxlength": verifier.form_max_length})
67
68 self.fields["sub"] = forms.CharField(
69 label=_(verifier.form_sub_label),
70 widget=sub_widget,
71 help_text=_(verifier.form_sub_help_text),
72 )
73
74 name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder)
75 if verifier.form_name_max_length:
76 name_widget.attrs.update({"maxlength": verifier.form_name_max_length})
77
78 self.fields["name"] = forms.CharField(
79 label=_(verifier.form_name_label), widget=name_widget, help_text=_(verifier.form_name_help_text)
80 )
81
82 def clean(self):
83 if not recaptcha.verify(self.data):
84 raise forms.ValidationError("reCAPTCHA failed")
85
[end of benefits/eligibility/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py
--- a/benefits/eligibility/forms.py
+++ b/benefits/eligibility/forms.py
@@ -27,7 +27,7 @@
super().__init__(*args, **kwargs)
verifiers = agency.eligibility_verifiers.all()
- self.classes = "offset-lg-1 col-lg-9"
+ self.classes = "col-lg-8"
# second element is not used since we render the whole label using selection_label_template,
# therefore set to None
self.fields["verifier"].choices = [(v.id, None) for v in verifiers]
@@ -56,7 +56,7 @@
def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):
super().__init__(*args, **kwargs)
- self.classes = "offset-lg-3 col-lg-6"
+ self.classes = "col-lg-6"
sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)
if verifier.form_sub_pattern:
sub_widget.attrs.update({"pattern": verifier.form_sub_pattern})
| {"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -27,7 +27,7 @@\n super().__init__(*args, **kwargs)\n verifiers = agency.eligibility_verifiers.all()\n \n- self.classes = \"offset-lg-1 col-lg-9\"\n+ self.classes = \"col-lg-8\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n@@ -56,7 +56,7 @@\n def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n- self.classes = \"offset-lg-3 col-lg-6\"\n+ self.classes = \"col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)\n if verifier.form_sub_pattern:\n sub_widget.attrs.update({\"pattern\": verifier.form_sub_pattern})\n", "issue": "Bug: Grid-width issue\nRelated to #1545 \r\n\r\nAlmost all instances of `col-lg-10` app should now be `col-lg-8`.\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n id = \"form-verifier-selection\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=widgets.VerifierRadioSelect)\n # sets label to empty string so the radio_select template can override the label style\n submit_value = _(\"eligibility.buttons.choose\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n verifiers = agency.eligibility_verifiers.all()\n\n self.classes = \"offset-lg-1 col-lg-9\"\n # second element is not used since we render the whole label using selection_label_template,\n # therefore set to None\n self.fields[\"verifier\"].choices = [(v.id, None) for v in verifiers]\n self.fields[\"verifier\"].widget.selection_label_templates = {v.id: v.selection_label_template for v in verifiers}\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n id = \"form-eligibility-verification\"\n method = \"POST\"\n\n submit_value = _(\"eligibility.forms.confirm.submit\")\n submitting_value = _(\"eligibility.forms.confirm.submitting\")\n\n _error_messages = {\n \"invalid\": _(\"eligibility.forms.confirm.errors.invalid\"),\n \"missing\": _(\"eligibility.forms.confirm.errors.missing\"),\n }\n\n def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.classes = \"offset-lg-3 col-lg-6\"\n sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)\n if verifier.form_sub_pattern:\n sub_widget.attrs.update({\"pattern\": verifier.form_sub_pattern})\n if verifier.form_input_mode:\n sub_widget.attrs.update({\"inputmode\": verifier.form_input_mode})\n if verifier.form_max_length:\n sub_widget.attrs.update({\"maxlength\": verifier.form_max_length})\n\n self.fields[\"sub\"] = forms.CharField(\n label=_(verifier.form_sub_label),\n widget=sub_widget,\n help_text=_(verifier.form_sub_help_text),\n )\n\n name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder)\n if verifier.form_name_max_length:\n name_widget.attrs.update({\"maxlength\": verifier.form_name_max_length})\n\n self.fields[\"name\"] = forms.CharField(\n label=_(verifier.form_name_label), widget=name_widget, help_text=_(verifier.form_name_help_text)\n )\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n", "path": "benefits/eligibility/forms.py"}]} | 1,406 | 259 |
gh_patches_debug_30699 | rasdani/github-patches | git_diff | plotly__dash-490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow excluding file patterns when generating component definitions
Thanks a lot for making Dash -- we have found it very useful for building dashboards.
We ran into an issue migrating to the new `dash-generate-components` utility.
In [dash-bootstrap-components](https://github.com/ASIDataScience/dash-bootstrap-components), we keep the components in `src/components`. Besides components files, this also includes a few unit test files called, e.g. `src/components/__tests__/DropdownMenu.test.js`. When we run `dash-generate-components ./src/components <output>`, these test files also get picked up. This leads to a traceback in the build logs:
```
Error with path src/components/__tests__/DropdownMenu.test.jsError: No suitable component definition found.
Error: No suitable component definition found.
at parse (/project/pascal/dash-bootstrap-components/node_modules/react-docgen/dist/parse.js:84:9)
at Object.defaultParse [as parse] (/project/pascal/dash-bootstrap-components/node_modules/react-docgen/dist/main.js:66:30)
at parseFile (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:64:51)
at dirs.forEach.filename (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:84:17)
at Array.forEach (<anonymous>)
at collectMetadataRecursively (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:79:14)
at dirs.forEach.filename (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:82:17)
at Array.forEach (<anonymous>)
at collectMetadataRecursively (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:79:14)
at componentPaths.forEach.componentPath (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:15:5)
```
While the error is, of course, legitimate, it would be nice to be able to suppress these tracebacks. I can see three routes:
- do nothing -- after all, this doesn't stop us from building `metadata.json`, it just makes it slightly more confusing.
- in `dash/extract-meta.js`, we could explicitly blacklist `__tests__` directories in the same way that files that don't end in `jsx?` are black-listed. AFAICT, the `__tests__` directory structure is the default with [jest](https://jestjs.io/docs/en/configuration.html#testmatch-array-string). [react-docgen](https://github.com/reactjs/react-docgen#cli) ignores `node_modules`, `__tests__` and `__mocks__` by default, so there is definitely a precedent.
- add a `--ignore` argument to `dash-generate-components` that allows passing file globs to be excluded.
Very happy to submit a PR if you decide on what the best course of action is.
</issue>
<code>
[start of dash/development/component_generator.py]
1 from __future__ import print_function
2
3 import json
4 import sys
5 import subprocess
6 import shlex
7 import os
8 import argparse
9 import shutil
10
11 import pkg_resources
12
13 from ._py_components_generation import generate_class_file
14 from ._py_components_generation import generate_imports
15 from ._py_components_generation import generate_classes_files
16
17
18 class _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,
19 argparse.RawDescriptionHelpFormatter):
20 pass
21
22
23 # pylint: disable=too-many-locals
24 def generate_components(components_source, project_shortname,
25 package_info_filename='package.json'):
26 is_windows = sys.platform == 'win32'
27
28 extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')
29
30 os.environ['NODE_PATH'] = 'node_modules'
31 cmd = shlex.split('node {} {}'.format(extract_path, components_source),
32 posix=not is_windows)
33
34 shutil.copyfile('package.json',
35 os.path.join(project_shortname, package_info_filename))
36
37 proc = subprocess.Popen(cmd,
38 stdout=subprocess.PIPE,
39 stderr=subprocess.PIPE,
40 shell=is_windows)
41 out, err = proc.communicate()
42 status = proc.poll()
43
44 if err:
45 print(err.decode(), file=sys.stderr)
46
47 if not out:
48 print(
49 'Error generating metadata in {} (status={})'.format(
50 project_shortname, status),
51 file=sys.stderr)
52 sys.exit(1)
53
54 metadata = json.loads(out.decode())
55
56 components = generate_classes_files(
57 project_shortname,
58 metadata,
59 generate_class_file
60 )
61
62 with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:
63 json.dump(metadata, f)
64
65 generate_imports(project_shortname, components)
66
67
68 def cli():
69 parser = argparse.ArgumentParser(
70 prog='dash-generate-components',
71 formatter_class=_CombinedFormatter,
72 description='Generate dash components by extracting the metadata '
73 'using react-docgen. Then map the metadata to python classes.'
74 )
75 parser.add_argument('components_source',
76 help='React components source directory.')
77 parser.add_argument(
78 'project_shortname',
79 help='Name of the project to export the classes files.'
80 )
81 parser.add_argument(
82 '-p', '--package-info-filename',
83 default='package.json',
84 help='The filename of the copied `package.json` to `project_shortname`'
85 )
86
87 args = parser.parse_args()
88 generate_components(args.components_source, args.project_shortname,
89 package_info_filename=args.package_info_filename)
90
91
92 if __name__ == '__main__':
93 cli()
94
[end of dash/development/component_generator.py]
[start of dash/version.py]
1 __version__ = '0.33.0'
2
[end of dash/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py
--- a/dash/development/component_generator.py
+++ b/dash/development/component_generator.py
@@ -22,14 +22,17 @@
# pylint: disable=too-many-locals
def generate_components(components_source, project_shortname,
- package_info_filename='package.json'):
+ package_info_filename='package.json',
+ ignore='^_'):
is_windows = sys.platform == 'win32'
extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')
os.environ['NODE_PATH'] = 'node_modules'
- cmd = shlex.split('node {} {}'.format(extract_path, components_source),
- posix=not is_windows)
+ cmd = shlex.split(
+ 'node {} {} {}'.format(extract_path, ignore, components_source),
+ posix=not is_windows
+ )
shutil.copyfile('package.json',
os.path.join(project_shortname, package_info_filename))
@@ -83,10 +86,18 @@
default='package.json',
help='The filename of the copied `package.json` to `project_shortname`'
)
+ parser.add_argument(
+ '-i', '--ignore',
+ default='^_',
+ help='Files/directories matching the pattern will be ignored'
+ )
args = parser.parse_args()
- generate_components(args.components_source, args.project_shortname,
- package_info_filename=args.package_info_filename)
+ generate_components(
+ args.components_source, args.project_shortname,
+ package_info_filename=args.package_info_filename,
+ ignore=args.ignore
+ )
if __name__ == '__main__':
diff --git a/dash/version.py b/dash/version.py
--- a/dash/version.py
+++ b/dash/version.py
@@ -1 +1 @@
-__version__ = '0.33.0'
+__version__ = '0.34.0'
| {"golden_diff": "diff --git a/dash/development/component_generator.py b/dash/development/component_generator.py\n--- a/dash/development/component_generator.py\n+++ b/dash/development/component_generator.py\n@@ -22,14 +22,17 @@\n \n # pylint: disable=too-many-locals\n def generate_components(components_source, project_shortname,\n- package_info_filename='package.json'):\n+ package_info_filename='package.json',\n+ ignore='^_'):\n is_windows = sys.platform == 'win32'\n \n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n \n os.environ['NODE_PATH'] = 'node_modules'\n- cmd = shlex.split('node {} {}'.format(extract_path, components_source),\n- posix=not is_windows)\n+ cmd = shlex.split(\n+ 'node {} {} {}'.format(extract_path, ignore, components_source),\n+ posix=not is_windows\n+ )\n \n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n@@ -83,10 +86,18 @@\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n+ parser.add_argument(\n+ '-i', '--ignore',\n+ default='^_',\n+ help='Files/directories matching the pattern will be ignored'\n+ )\n \n args = parser.parse_args()\n- generate_components(args.components_source, args.project_shortname,\n- package_info_filename=args.package_info_filename)\n+ generate_components(\n+ args.components_source, args.project_shortname,\n+ package_info_filename=args.package_info_filename,\n+ ignore=args.ignore\n+ )\n \n \n if __name__ == '__main__':\ndiff --git a/dash/version.py b/dash/version.py\n--- a/dash/version.py\n+++ b/dash/version.py\n@@ -1 +1 @@\n-__version__ = '0.33.0'\n+__version__ = '0.34.0'\n", "issue": "Allow excluding file patterns when generating component definitions \nThanks a lot for making Dash -- we have found it very useful for building dashboards.\r\n\r\nWe ran into an issue migrating to the new `dash-generate-components` utility.\r\n\r\nIn [dash-bootstrap-components](https://github.com/ASIDataScience/dash-bootstrap-components), we keep the components in `src/components`. Besides components files, this also includes a few unit test files called, e.g. `src/components/__tests__/DropdownMenu.test.js`. When we run `dash-generate-components ./src/components <output>`, these test files also get picked up. This leads to a traceback in the build logs: \r\n\r\n```\r\nError with path src/components/__tests__/DropdownMenu.test.jsError: No suitable component definition found.\r\nError: No suitable component definition found.\r\nat parse (/project/pascal/dash-bootstrap-components/node_modules/react-docgen/dist/parse.js:84:9)\r\nat Object.defaultParse [as parse] (/project/pascal/dash-bootstrap-components/node_modules/react-docgen/dist/main.js:66:30)\r\nat parseFile (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:64:51)\r\nat dirs.forEach.filename (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:84:17)\r\nat Array.forEach (<anonymous>)\r\nat collectMetadataRecursively (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:79:14)\r\nat dirs.forEach.filename (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:82:17)\r\nat Array.forEach (<anonymous>)\r\nat collectMetadataRecursively (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:79:14)\r\nat componentPaths.forEach.componentPath (/opt/anaconda/envs/Python3/lib/python3.6/site-packages/dash/extract-meta.js:15:5)\r\n```\r\n\r\nWhile the error is, of course, legitimate, it would be nice to be able to suppress these tracebacks. I can see three routes:\r\n- do nothing -- after all, this doesn't stop us from building `metadata.json`, it just makes it slightly more confusing.\r\n- in `dash/extract-meta.js`, we could explicitly blacklist `__tests__` directories in the same way that files that don't end in `jsx?` are black-listed. AFAICT, the `__tests__` directory structure is the default with [jest](https://jestjs.io/docs/en/configuration.html#testmatch-array-string). [react-docgen](https://github.com/reactjs/react-docgen#cli) ignores `node_modules`, `__tests__` and `__mocks__` by default, so there is definitely a precedent.\r\n- add a `--ignore` argument to `dash-generate-components` that allows passing file globs to be excluded. \r\n\r\nVery happy to submit a PR if you decide on what the best course of action is.\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport sys\nimport subprocess\nimport shlex\nimport os\nimport argparse\nimport shutil\n\nimport pkg_resources\n\nfrom ._py_components_generation import generate_class_file\nfrom ._py_components_generation import generate_imports\nfrom ._py_components_generation import generate_classes_files\n\n\nclass _CombinedFormatter(argparse.ArgumentDefaultsHelpFormatter,\n argparse.RawDescriptionHelpFormatter):\n pass\n\n\n# pylint: disable=too-many-locals\ndef generate_components(components_source, project_shortname,\n package_info_filename='package.json'):\n is_windows = sys.platform == 'win32'\n\n extract_path = pkg_resources.resource_filename('dash', 'extract-meta.js')\n\n os.environ['NODE_PATH'] = 'node_modules'\n cmd = shlex.split('node {} {}'.format(extract_path, components_source),\n posix=not is_windows)\n\n shutil.copyfile('package.json',\n os.path.join(project_shortname, package_info_filename))\n\n proc = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=is_windows)\n out, err = proc.communicate()\n status = proc.poll()\n\n if err:\n print(err.decode(), file=sys.stderr)\n\n if not out:\n print(\n 'Error generating metadata in {} (status={})'.format(\n project_shortname, status),\n file=sys.stderr)\n sys.exit(1)\n\n metadata = json.loads(out.decode())\n\n components = generate_classes_files(\n project_shortname,\n metadata,\n generate_class_file\n )\n\n with open(os.path.join(project_shortname, 'metadata.json'), 'w') as f:\n json.dump(metadata, f)\n\n generate_imports(project_shortname, components)\n\n\ndef cli():\n parser = argparse.ArgumentParser(\n prog='dash-generate-components',\n formatter_class=_CombinedFormatter,\n description='Generate dash components by extracting the metadata '\n 'using react-docgen. Then map the metadata to python classes.'\n )\n parser.add_argument('components_source',\n help='React components source directory.')\n parser.add_argument(\n 'project_shortname',\n help='Name of the project to export the classes files.'\n )\n parser.add_argument(\n '-p', '--package-info-filename',\n default='package.json',\n help='The filename of the copied `package.json` to `project_shortname`'\n )\n\n args = parser.parse_args()\n generate_components(args.components_source, args.project_shortname,\n package_info_filename=args.package_info_filename)\n\n\nif __name__ == '__main__':\n cli()\n", "path": "dash/development/component_generator.py"}, {"content": "__version__ = '0.33.0'\n", "path": "dash/version.py"}]} | 1,954 | 442 |
gh_patches_debug_22410 | rasdani/github-patches | git_diff | ESMCI__cime-2298 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
query_config --component not working for E3SM
output from scripts_regression_tests:
```
======================================================================
ERROR: test_query_components (__main__.X_TestQueryConfig)
----------------------------------------------------------------------
Traceback (most recent call last):
File "./scripts_regression_tests.py", line 1452, in test_query_components
run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR))
File "/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests/../lib/CIME/utils.py", line 424, in run_cmd_no_fail
expect(False, "Command: '{}' failed with error '{}' from dir '{}'".format(cmd, errput.encode('utf-8'), os.getcwd() if from_dir is None else from_dir))
File "/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests/../lib/CIME/utils.py", line 112, in expect
raise exc_type(msg)
SystemExit: ERROR: Command: '/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/query_config --components' failed with error 'ERROR: Expected one child' from dir '/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests'
```
</issue>
<code>
[start of scripts/lib/CIME/XML/files.py]
1 """
2 Interface to the config_files.xml file. This class inherits from EntryID.py
3 """
4 import re
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.XML.entry_id import EntryID
8 from CIME.utils import expect, get_cime_root, get_model
9
10 logger = logging.getLogger(__name__)
11
12 class Files(EntryID):
13
14 def __init__(self):
15 """
16 initialize an object
17
18 >>> files = Files()
19 >>> files.get_value('CASEFILE_HEADERS',resolved=False)
20 '$CIMEROOT/config/config_headers.xml'
21 """
22 cimeroot = get_cime_root()
23 infile = os.path.join(cimeroot, "config", get_model(), "config_files.xml")
24 expect(os.path.isfile(infile), "Could not find or open file {}".format(infile))
25 schema = os.path.join(cimeroot, "config", "xml_schemas", "entry_id.xsd")
26 EntryID.__init__(self, infile, schema=schema)
27 config_files_override = os.path.join(os.path.dirname(cimeroot),".config_files.xml")
28 # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only
29 self.COMP_ROOT_DIR = {}
30
31 # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files
32
33 if os.path.isfile(config_files_override):
34 self.read(config_files_override)
35 self.overwrite_existing_entries()
36
37 def get_value(self, vid, attribute=None, resolved=True, subgroup=None):
38 if "COMP_ROOT_DIR" in vid:
39 if vid in self.COMP_ROOT_DIR:
40 if attribute is not None:
41 if vid+attribute["component"] in self.COMP_ROOT_DIR:
42 return self.COMP_ROOT_DIR[vid+attribute["component"]]
43 else:
44 return self.COMP_ROOT_DIR[vid]
45
46 value = super(Files, self).get_value(vid, attribute=attribute, resolved=False, subgroup=subgroup)
47 if value is None and attribute is not None:
48 value = super(Files, self).get_value(vid, attribute=None, resolved=False, subgroup=subgroup)
49
50 if "COMP_ROOT_DIR" not in vid and value is not None and "COMP_ROOT_DIR" in value:
51 m = re.search("(COMP_ROOT_DIR_[^/]+)/", value)
52 comp_root_dir_var_name = m.group(1)
53 comp_root_dir = self.get_value(comp_root_dir_var_name, attribute=attribute, resolved=False, subgroup=subgroup)
54 self.set_value(comp_root_dir_var_name, comp_root_dir,subgroup=attribute)
55 if resolved:
56 value = value.replace("$"+comp_root_dir_var_name, comp_root_dir)
57
58
59 if resolved and value is not None:
60 value = self.get_resolved_value(value)
61
62 return value
63
64 def set_value(self, vid, value,subgroup=None,ignore_type=False):
65 if "COMP_ROOT_DIR" in vid:
66 if subgroup is not None:
67 self.COMP_ROOT_DIR[vid+subgroup["component"]] = value
68 else:
69 self.COMP_ROOT_DIR[vid] = value
70
71 else:
72 expect(False, "Attempt to set a nonmutable variable {}".format(vid))
73 return value
74
75
76 def get_schema(self, nodename, attributes=None):
77 node = self.get_optional_child("entry", {"id":nodename})
78 schemanode = self.get_optional_child("schema", root=node, attributes=attributes)
79 if schemanode is not None:
80 logger.debug("Found schema for {}".format(nodename))
81 return self.get_resolved_value(self.text(schemanode))
82 return None
83
84 def get_components(self, nodename):
85 node = self.get_child("entry", {"id":nodename})
86 valnodes = self.get_children("value", root=self.get_child("values", root=node))
87 values = []
88 for valnode in valnodes:
89 value = self.get(valnode, "component")
90 values.append(value)
91 return values
92
[end of scripts/lib/CIME/XML/files.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/XML/files.py b/scripts/lib/CIME/XML/files.py
--- a/scripts/lib/CIME/XML/files.py
+++ b/scripts/lib/CIME/XML/files.py
@@ -67,7 +67,7 @@
self.COMP_ROOT_DIR[vid+subgroup["component"]] = value
else:
self.COMP_ROOT_DIR[vid] = value
-
+
else:
expect(False, "Attempt to set a nonmutable variable {}".format(vid))
return value
@@ -82,10 +82,13 @@
return None
def get_components(self, nodename):
- node = self.get_child("entry", {"id":nodename})
- valnodes = self.get_children("value", root=self.get_child("values", root=node))
- values = []
- for valnode in valnodes:
- value = self.get(valnode, "component")
- values.append(value)
- return values
+ node = self.get_optional_child("entry", {"id":nodename})
+ if node is not None:
+ valnodes = self.get_children("value", root=self.get_child("values", root=node))
+ values = []
+ for valnode in valnodes:
+ value = self.get(valnode, "component")
+ values.append(value)
+ return values
+
+ return None
| {"golden_diff": "diff --git a/scripts/lib/CIME/XML/files.py b/scripts/lib/CIME/XML/files.py\n--- a/scripts/lib/CIME/XML/files.py\n+++ b/scripts/lib/CIME/XML/files.py\n@@ -67,7 +67,7 @@\n self.COMP_ROOT_DIR[vid+subgroup[\"component\"]] = value\n else:\n self.COMP_ROOT_DIR[vid] = value\n- \n+\n else:\n expect(False, \"Attempt to set a nonmutable variable {}\".format(vid))\n return value\n@@ -82,10 +82,13 @@\n return None\n \n def get_components(self, nodename):\n- node = self.get_child(\"entry\", {\"id\":nodename})\n- valnodes = self.get_children(\"value\", root=self.get_child(\"values\", root=node))\n- values = []\n- for valnode in valnodes:\n- value = self.get(valnode, \"component\")\n- values.append(value)\n- return values\n+ node = self.get_optional_child(\"entry\", {\"id\":nodename})\n+ if node is not None:\n+ valnodes = self.get_children(\"value\", root=self.get_child(\"values\", root=node))\n+ values = []\n+ for valnode in valnodes:\n+ value = self.get(valnode, \"component\")\n+ values.append(value)\n+ return values\n+\n+ return None\n", "issue": "query_config --component not working for E3SM\noutput from scripts_regression_tests:\r\n```\r\n======================================================================\r\nERROR: test_query_components (__main__.X_TestQueryConfig)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"./scripts_regression_tests.py\", line 1452, in test_query_components\r\n run_cmd_no_fail(\"{}/query_config --components\".format(SCRIPT_DIR))\r\n File \"/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests/../lib/CIME/utils.py\", line 424, in run_cmd_no_fail\r\n expect(False, \"Command: '{}' failed with error '{}' from dir '{}'\".format(cmd, errput.encode('utf-8'), os.getcwd() if from_dir is None else from_dir))\r\n File \"/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests/../lib/CIME/utils.py\", line 112, in expect\r\n raise exc_type(msg)\r\nSystemExit: ERROR: Command: '/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/query_config --components' failed with error 'ERROR: Expected one child' from dir '/lcrc/group/earthscience/jacob/cimeESMCI2/scripts/tests'\r\n```\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nInterface to the config_files.xml file. This class inherits from EntryID.py\n\"\"\"\nimport re\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.XML.entry_id import EntryID\nfrom CIME.utils import expect, get_cime_root, get_model\n\nlogger = logging.getLogger(__name__)\n\nclass Files(EntryID):\n\n def __init__(self):\n \"\"\"\n initialize an object\n\n >>> files = Files()\n >>> files.get_value('CASEFILE_HEADERS',resolved=False)\n '$CIMEROOT/config/config_headers.xml'\n \"\"\"\n cimeroot = get_cime_root()\n infile = os.path.join(cimeroot, \"config\", get_model(), \"config_files.xml\")\n expect(os.path.isfile(infile), \"Could not find or open file {}\".format(infile))\n schema = os.path.join(cimeroot, \"config\", \"xml_schemas\", \"entry_id.xsd\")\n EntryID.__init__(self, infile, schema=schema)\n config_files_override = os.path.join(os.path.dirname(cimeroot),\".config_files.xml\")\n # variables COMP_ROOT_DIR_{} are mutable, all other variables are read only\n self.COMP_ROOT_DIR = {}\n\n # .config_file.xml at the top level may overwrite COMP_ROOT_DIR_ nodes in config_files\n\n if os.path.isfile(config_files_override):\n self.read(config_files_override)\n self.overwrite_existing_entries()\n\n def get_value(self, vid, attribute=None, resolved=True, subgroup=None):\n if \"COMP_ROOT_DIR\" in vid:\n if vid in self.COMP_ROOT_DIR:\n if attribute is not None:\n if vid+attribute[\"component\"] in self.COMP_ROOT_DIR:\n return self.COMP_ROOT_DIR[vid+attribute[\"component\"]]\n else:\n return self.COMP_ROOT_DIR[vid]\n\n value = super(Files, self).get_value(vid, attribute=attribute, resolved=False, subgroup=subgroup)\n if value is None and attribute is not None:\n value = super(Files, self).get_value(vid, attribute=None, resolved=False, subgroup=subgroup)\n\n if \"COMP_ROOT_DIR\" not in vid and value is not None and \"COMP_ROOT_DIR\" in value:\n m = re.search(\"(COMP_ROOT_DIR_[^/]+)/\", value)\n comp_root_dir_var_name = m.group(1)\n comp_root_dir = self.get_value(comp_root_dir_var_name, attribute=attribute, resolved=False, subgroup=subgroup)\n self.set_value(comp_root_dir_var_name, comp_root_dir,subgroup=attribute)\n if resolved:\n value = value.replace(\"$\"+comp_root_dir_var_name, comp_root_dir)\n\n\n if resolved and value is not None:\n value = self.get_resolved_value(value)\n\n return value\n\n def set_value(self, vid, value,subgroup=None,ignore_type=False):\n if \"COMP_ROOT_DIR\" in vid:\n if subgroup is not None:\n self.COMP_ROOT_DIR[vid+subgroup[\"component\"]] = value\n else:\n self.COMP_ROOT_DIR[vid] = value\n \n else:\n expect(False, \"Attempt to set a nonmutable variable {}\".format(vid))\n return value\n\n\n def get_schema(self, nodename, attributes=None):\n node = self.get_optional_child(\"entry\", {\"id\":nodename})\n schemanode = self.get_optional_child(\"schema\", root=node, attributes=attributes)\n if schemanode is not None:\n logger.debug(\"Found schema for {}\".format(nodename))\n return self.get_resolved_value(self.text(schemanode))\n return None\n\n def get_components(self, nodename):\n node = self.get_child(\"entry\", {\"id\":nodename})\n valnodes = self.get_children(\"value\", root=self.get_child(\"values\", root=node))\n values = []\n for valnode in valnodes:\n value = self.get(valnode, \"component\")\n values.append(value)\n return values\n", "path": "scripts/lib/CIME/XML/files.py"}]} | 1,820 | 300 |
gh_patches_debug_11870 | rasdani/github-patches | git_diff | vllm-project__vllm-5077 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove EOS token before passing the tokenized input to model
How to remove eos token id before passing the input tokens to model. I'm trying for fine-tuned mistral model. Just because there is an eos token id at the end of sentence, model generates the results for a different input which is similar to original input
</issue>
<code>
[start of vllm/engine/output_processor/stop_checker.py]
1 from typing import Callable, Optional
2
3 from transformers import PreTrainedTokenizer
4
5 from vllm.lora.request import LoRARequest
6 from vllm.sampling_params import SamplingParams
7 from vllm.sequence import Sequence, SequenceStatus
8
9
10 class StopChecker:
11 """LLMEngine helper class which separates out the logic involving stop
12 checking. This checks things such as: whether the eos token was emitted,
13 whether the max_tokens has been consumed, whether a stop string has been
14 emitted, or if we have exceeded the max model len.
15 """
16
17 def __init__(self, max_model_len: int,
18 get_tokenizer_for_seq: Callable[[Sequence],
19 PreTrainedTokenizer]):
20 # Do not use it directly, but use `self._get_max_model_len`.
21 self._max_model_len = max_model_len
22 self.get_tokenizer_for_seq = get_tokenizer_for_seq
23
24 def _get_max_model_len(self, lora_req: Optional[LoRARequest]):
25 if lora_req and lora_req.long_lora_max_len:
26 return lora_req.long_lora_max_len
27 else:
28 return self._max_model_len
29
30 def maybe_stop_sequence(
31 self,
32 seq: Sequence,
33 new_char_count: int,
34 sampling_params: SamplingParams,
35 lora_req: Optional[LoRARequest] = None,
36 ) -> None:
37 """Stop the finished sequences.
38
39 new_char_count is the number of chars added to the
40 sequence's output text for the newly generated token
41 """
42
43 # Check if the minimum number of tokens has been generated yet;
44 # skip the stop string/token checks if not
45 if seq.get_output_len() < sampling_params.min_tokens:
46 return
47
48 # Check if the sequence has generated the EOS token.
49 if ((not sampling_params.ignore_eos)
50 and seq.get_last_token_id() == seq.eos_token_id):
51 seq.status = SequenceStatus.FINISHED_STOPPED
52 return
53
54 # Check if a stop token was encountered.
55 # This assumes a single token produced per step.
56 last_token_id = seq.get_last_token_id()
57 if last_token_id in sampling_params.stop_token_ids:
58 if new_char_count and (
59 not sampling_params.include_stop_str_in_output):
60 # Remove last token
61 seq.output_text = seq.output_text[:-new_char_count]
62 seq.status = SequenceStatus.FINISHED_STOPPED
63 seq.stop_reason = last_token_id
64 return
65
66 # Check if any stop strings are matched.
67 stop_str = self._check_stop_strings(seq, new_char_count,
68 sampling_params)
69 if stop_str is not None:
70 seq.status = SequenceStatus.FINISHED_STOPPED
71 seq.stop_reason = stop_str
72 return
73
74 # Check if the sequence has reached max_model_len.
75 if seq.get_len() > self._get_max_model_len(lora_req):
76 seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
77 return
78
79 # Check if the sequence has reached max_tokens.
80 if seq.get_output_len() == sampling_params.max_tokens:
81 seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
82 return
83
84 @staticmethod
85 def _check_stop_strings(seq: Sequence, new_char_count: int,
86 sampling_params: SamplingParams) -> Optional[str]:
87 """Check if any stop strings are matched and truncate sequence
88 output text accordingly.
89
90 Returns the stop string if matched or else None.
91 """
92 if not new_char_count:
93 return None
94
95 for stop_str in sampling_params.stop:
96 stop_string_len = len(stop_str)
97 # Avoid searching already-searched text.
98 stop_index = seq.output_text.find(
99 stop_str, -new_char_count - stop_string_len)
100 if stop_index == -1:
101 continue
102
103 if sampling_params.include_stop_str_in_output:
104 # Truncate to end of stop string.
105 stop_index += stop_string_len
106 if stop_index >= len(seq.output_text):
107 # No truncation required.
108 return stop_str
109
110 # Truncate the output text to either the beginning
111 # or end of the stop string.
112 seq.output_text = seq.output_text[:stop_index]
113 return stop_str
114 return None
115
[end of vllm/engine/output_processor/stop_checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vllm/engine/output_processor/stop_checker.py b/vllm/engine/output_processor/stop_checker.py
--- a/vllm/engine/output_processor/stop_checker.py
+++ b/vllm/engine/output_processor/stop_checker.py
@@ -48,6 +48,11 @@
# Check if the sequence has generated the EOS token.
if ((not sampling_params.ignore_eos)
and seq.get_last_token_id() == seq.eos_token_id):
+ # Remove the last EOS token unless explicitly specified
+ # This prevents unintended exposure of the EOS token
+ if new_char_count and (
+ not sampling_params.include_stop_str_in_output):
+ seq.output_text = seq.output_text[:-new_char_count]
seq.status = SequenceStatus.FINISHED_STOPPED
return
| {"golden_diff": "diff --git a/vllm/engine/output_processor/stop_checker.py b/vllm/engine/output_processor/stop_checker.py\n--- a/vllm/engine/output_processor/stop_checker.py\n+++ b/vllm/engine/output_processor/stop_checker.py\n@@ -48,6 +48,11 @@\n # Check if the sequence has generated the EOS token.\n if ((not sampling_params.ignore_eos)\n and seq.get_last_token_id() == seq.eos_token_id):\n+ # Remove the last EOS token unless explicitly specified\n+ # This prevents unintended exposure of the EOS token\n+ if new_char_count and (\n+ not sampling_params.include_stop_str_in_output):\n+ seq.output_text = seq.output_text[:-new_char_count]\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n", "issue": "Remove EOS token before passing the tokenized input to model\n\r\n\r\nHow to remove eos token id before passing the input tokens to model. I'm trying for fine-tuned mistral model. Just because there is an eos token id at the end of sentence, model generates the results for a different input which is similar to original input\n", "before_files": [{"content": "from typing import Callable, Optional\n\nfrom transformers import PreTrainedTokenizer\n\nfrom vllm.lora.request import LoRARequest\nfrom vllm.sampling_params import SamplingParams\nfrom vllm.sequence import Sequence, SequenceStatus\n\n\nclass StopChecker:\n \"\"\"LLMEngine helper class which separates out the logic involving stop\n checking. This checks things such as: whether the eos token was emitted,\n whether the max_tokens has been consumed, whether a stop string has been\n emitted, or if we have exceeded the max model len.\n \"\"\"\n\n def __init__(self, max_model_len: int,\n get_tokenizer_for_seq: Callable[[Sequence],\n PreTrainedTokenizer]):\n # Do not use it directly, but use `self._get_max_model_len`.\n self._max_model_len = max_model_len\n self.get_tokenizer_for_seq = get_tokenizer_for_seq\n\n def _get_max_model_len(self, lora_req: Optional[LoRARequest]):\n if lora_req and lora_req.long_lora_max_len:\n return lora_req.long_lora_max_len\n else:\n return self._max_model_len\n\n def maybe_stop_sequence(\n self,\n seq: Sequence,\n new_char_count: int,\n sampling_params: SamplingParams,\n lora_req: Optional[LoRARequest] = None,\n ) -> None:\n \"\"\"Stop the finished sequences.\n\n new_char_count is the number of chars added to the\n sequence's output text for the newly generated token\n \"\"\"\n\n # Check if the minimum number of tokens has been generated yet;\n # skip the stop string/token checks if not\n if seq.get_output_len() < sampling_params.min_tokens:\n return\n\n # Check if the sequence has generated the EOS token.\n if ((not sampling_params.ignore_eos)\n and seq.get_last_token_id() == seq.eos_token_id):\n seq.status = SequenceStatus.FINISHED_STOPPED\n return\n\n # Check if a stop token was encountered.\n # This assumes a single token produced per step.\n last_token_id = seq.get_last_token_id()\n if last_token_id in sampling_params.stop_token_ids:\n if new_char_count and (\n not sampling_params.include_stop_str_in_output):\n # Remove last token\n seq.output_text = seq.output_text[:-new_char_count]\n seq.status = SequenceStatus.FINISHED_STOPPED\n seq.stop_reason = last_token_id\n return\n\n # Check if any stop strings are matched.\n stop_str = self._check_stop_strings(seq, new_char_count,\n sampling_params)\n if stop_str is not None:\n seq.status = SequenceStatus.FINISHED_STOPPED\n seq.stop_reason = stop_str\n return\n\n # Check if the sequence has reached max_model_len.\n if seq.get_len() > self._get_max_model_len(lora_req):\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n # Check if the sequence has reached max_tokens.\n if seq.get_output_len() == sampling_params.max_tokens:\n seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED\n return\n\n @staticmethod\n def _check_stop_strings(seq: Sequence, new_char_count: int,\n sampling_params: SamplingParams) -> Optional[str]:\n \"\"\"Check if any stop strings are matched and truncate sequence\n output text accordingly.\n\n Returns the stop string if matched or else None.\n \"\"\"\n if not new_char_count:\n return None\n\n for stop_str in sampling_params.stop:\n stop_string_len = len(stop_str)\n # Avoid searching already-searched text.\n stop_index = seq.output_text.find(\n stop_str, -new_char_count - stop_string_len)\n if stop_index == -1:\n continue\n\n if sampling_params.include_stop_str_in_output:\n # Truncate to end of stop string.\n stop_index += stop_string_len\n if stop_index >= len(seq.output_text):\n # No truncation required.\n return stop_str\n\n # Truncate the output text to either the beginning\n # or end of the stop string.\n seq.output_text = seq.output_text[:stop_index]\n return stop_str\n return None\n", "path": "vllm/engine/output_processor/stop_checker.py"}]} | 1,760 | 175 |
gh_patches_debug_32610 | rasdani/github-patches | git_diff | conan-io__conan-center-index-3830 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[package] libuuid/1.0.3: apple_clang 12 build broken by patch
build of libuuid 1.0.3 is broken on macOS11 / apple-clang 12 due to applied patch
the define HAVE_SYS_TIME_H seems to be not defined but the include is valid
```
+#if defined(HAVE_SYS_TIME_H)
#include <sys/time.h>
+#endif
```
this can be fixed by
```
+#if defined(HAVE_SYS_TIME_H) || defined(__APPLE__)
#include <sys/time.h>
+#endif
```
but I guess there is a better way to ensure the define is set for apple platform
</issue>
<code>
[start of recipes/libuuid/all/conanfile.py]
1 from conans import ConanFile, AutoToolsBuildEnvironment, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5
6 class LibuuidConan(ConanFile):
7 name = "libuuid"
8 description = "Portable uuid C library"
9 url = "https://github.com/conan-io/conan-center-index"
10 homepage = "https://sourceforge.net/projects/libuuid/"
11 license = "BSD-3-Clause"
12 topics = ("conan", "libuuid", "uuid", "unique-id", "unique-identifier")
13 settings = "os", "arch", "compiler", "build_type"
14 exports_sources = "patches/**"
15 options = {"shared": [True, False], "fPIC": [True, False]}
16 default_options = {"shared": False, "fPIC": True}
17 _source_subfolder = "source_subfolder"
18 _autotools = None
19
20 def source(self):
21 tools.get(**self.conan_data["sources"][self.version])
22 os.rename(self.name + "-" + self.version, self._source_subfolder)
23
24 def _patch_sources(self):
25 for patch in self.conan_data["patches"][self.version]:
26 tools.patch(**patch)
27
28 def config_options(self):
29 if self.settings.os == 'Windows':
30 del self.options.fPIC
31
32 def configure(self):
33 if self.settings.os == "Windows":
34 raise ConanInvalidConfiguration("libuuid is not supported on Windows")
35 del self.settings.compiler.libcxx
36 del self.settings.compiler.cppstd
37
38 def _configure_autotools(self):
39 if not self._autotools:
40 configure_args = [
41 "--enable-shared=%s" % ("yes" if self.options.shared else "no"),
42 "--enable-static=%s" % ("no" if self.options.shared else "yes")
43 ]
44 self._autotools = AutoToolsBuildEnvironment(self)
45 if "x86" in self.settings.arch:
46 self._autotools.flags.append('-mstackrealign')
47 self._autotools.configure(args=configure_args)
48 return self._autotools
49
50 def build(self):
51 self._patch_sources()
52 with tools.chdir(self._source_subfolder):
53 autotools = self._configure_autotools()
54 autotools.make()
55
56 def package(self):
57 self.copy("COPYING", dst="licenses", src=self._source_subfolder)
58 with tools.chdir(self._source_subfolder):
59 autotools = self._configure_autotools()
60 autotools.install()
61 la_file = os.path.join(self.package_folder, "lib", "libuuid.la")
62 if os.path.isfile(la_file):
63 os.unlink(la_file)
64 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
65
66 def package_info(self):
67 self.cpp_info.libs = tools.collect_libs(self)
68 self.cpp_info.includedirs.append(os.path.join("include", "uuid"))
69
[end of recipes/libuuid/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/libuuid/all/conanfile.py b/recipes/libuuid/all/conanfile.py
--- a/recipes/libuuid/all/conanfile.py
+++ b/recipes/libuuid/all/conanfile.py
@@ -35,29 +35,34 @@
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
+ def build_requirements(self):
+ self.build_requires("libtool/2.4.6")
+
def _configure_autotools(self):
- if not self._autotools:
- configure_args = [
- "--enable-shared=%s" % ("yes" if self.options.shared else "no"),
- "--enable-static=%s" % ("no" if self.options.shared else "yes")
- ]
- self._autotools = AutoToolsBuildEnvironment(self)
- if "x86" in self.settings.arch:
- self._autotools.flags.append('-mstackrealign')
- self._autotools.configure(args=configure_args)
+ if self._autotools:
+ return self._autotools
+ self._autotools = AutoToolsBuildEnvironment(self)
+ yes_no = lambda v: "yes" if v else "no"
+ configure_args = [
+ "--enable-shared={}".format(yes_no(self.options.shared)),
+ "--enable-static={}".format(yes_no(not self.options.shared)),
+ ]
+ if "x86" in self.settings.arch:
+ self._autotools.flags.append('-mstackrealign')
+ self._autotools.configure(args=configure_args, configure_dir=self._source_subfolder)
return self._autotools
def build(self):
self._patch_sources()
with tools.chdir(self._source_subfolder):
- autotools = self._configure_autotools()
- autotools.make()
+ self.run("autoreconf -fiv", run_environment=True)
+ autotools = self._configure_autotools()
+ autotools.make()
def package(self):
self.copy("COPYING", dst="licenses", src=self._source_subfolder)
- with tools.chdir(self._source_subfolder):
- autotools = self._configure_autotools()
- autotools.install()
+ autotools = self._configure_autotools()
+ autotools.install()
la_file = os.path.join(self.package_folder, "lib", "libuuid.la")
if os.path.isfile(la_file):
os.unlink(la_file)
| {"golden_diff": "diff --git a/recipes/libuuid/all/conanfile.py b/recipes/libuuid/all/conanfile.py\n--- a/recipes/libuuid/all/conanfile.py\n+++ b/recipes/libuuid/all/conanfile.py\n@@ -35,29 +35,34 @@\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n \n+ def build_requirements(self):\n+ self.build_requires(\"libtool/2.4.6\")\n+\n def _configure_autotools(self):\n- if not self._autotools:\n- configure_args = [\n- \"--enable-shared=%s\" % (\"yes\" if self.options.shared else \"no\"),\n- \"--enable-static=%s\" % (\"no\" if self.options.shared else \"yes\")\n- ]\n- self._autotools = AutoToolsBuildEnvironment(self)\n- if \"x86\" in self.settings.arch:\n- self._autotools.flags.append('-mstackrealign')\n- self._autotools.configure(args=configure_args)\n+ if self._autotools:\n+ return self._autotools\n+ self._autotools = AutoToolsBuildEnvironment(self)\n+ yes_no = lambda v: \"yes\" if v else \"no\"\n+ configure_args = [\n+ \"--enable-shared={}\".format(yes_no(self.options.shared)),\n+ \"--enable-static={}\".format(yes_no(not self.options.shared)),\n+ ]\n+ if \"x86\" in self.settings.arch:\n+ self._autotools.flags.append('-mstackrealign')\n+ self._autotools.configure(args=configure_args, configure_dir=self._source_subfolder)\n return self._autotools\n \n def build(self):\n self._patch_sources()\n with tools.chdir(self._source_subfolder):\n- autotools = self._configure_autotools()\n- autotools.make()\n+ self.run(\"autoreconf -fiv\", run_environment=True)\n+ autotools = self._configure_autotools()\n+ autotools.make()\n \n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n- with tools.chdir(self._source_subfolder):\n- autotools = self._configure_autotools()\n- autotools.install()\n+ autotools = self._configure_autotools()\n+ autotools.install()\n la_file = os.path.join(self.package_folder, \"lib\", \"libuuid.la\")\n if os.path.isfile(la_file):\n os.unlink(la_file)\n", "issue": "[package] libuuid/1.0.3: apple_clang 12 build broken by patch\nbuild of libuuid 1.0.3 is broken on macOS11 / apple-clang 12 due to applied patch\r\nthe define HAVE_SYS_TIME_H seems to be not defined but the include is valid\r\n\r\n```\r\n+#if defined(HAVE_SYS_TIME_H)\r\n #include <sys/time.h>\r\n+#endif\r\n```\r\n\r\nthis can be fixed by \r\n\r\n```\r\n+#if defined(HAVE_SYS_TIME_H) || defined(__APPLE__)\r\n #include <sys/time.h>\r\n+#endif\r\n```\r\n\r\nbut I guess there is a better way to ensure the define is set for apple platform\r\n\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass LibuuidConan(ConanFile):\n name = \"libuuid\"\n description = \"Portable uuid C library\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://sourceforge.net/projects/libuuid/\"\n license = \"BSD-3-Clause\"\n topics = (\"conan\", \"libuuid\", \"uuid\", \"unique-id\", \"unique-identifier\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n exports_sources = \"patches/**\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n _source_subfolder = \"source_subfolder\"\n _autotools = None\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def config_options(self):\n if self.settings.os == 'Windows':\n del self.options.fPIC\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"libuuid is not supported on Windows\")\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_autotools(self):\n if not self._autotools:\n configure_args = [\n \"--enable-shared=%s\" % (\"yes\" if self.options.shared else \"no\"),\n \"--enable-static=%s\" % (\"no\" if self.options.shared else \"yes\")\n ]\n self._autotools = AutoToolsBuildEnvironment(self)\n if \"x86\" in self.settings.arch:\n self._autotools.flags.append('-mstackrealign')\n self._autotools.configure(args=configure_args)\n return self._autotools\n\n def build(self):\n self._patch_sources()\n with tools.chdir(self._source_subfolder):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n with tools.chdir(self._source_subfolder):\n autotools = self._configure_autotools()\n autotools.install()\n la_file = os.path.join(self.package_folder, \"lib\", \"libuuid.la\")\n if os.path.isfile(la_file):\n os.unlink(la_file)\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n self.cpp_info.includedirs.append(os.path.join(\"include\", \"uuid\"))\n", "path": "recipes/libuuid/all/conanfile.py"}]} | 1,447 | 559 |
gh_patches_debug_28103 | rasdani/github-patches | git_diff | bridgecrewio__checkov-39 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
security_groups in aws_security_group rule not supported
**Describe the bug**
referencing a `security_group` instead of `cidr_block` in a security group rule causes an exception
**To Reproduce**
Steps to reproduce the behavior:
1. try to run checkov on the following resource:
```tf
resource "aws_security_group" "bar-sg" {
name = "sg-bar"
vpc_id = aws_vpc.main.id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
security_groups = [aws_security_group.foo-sg.id]
description = "foo"
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
```
result:
```
Traceback (most recent call last):
File "/path/tf-checks/bin/checkov", line 34, in <module>
report = Runner().run(root_folder, external_checks_dir=args.external_checks_dir)
File "/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/runner.py", line 38, in run
results = resource_registry.scan(resource, scanned_file, skipped_checks)
File "/pathtf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/registry.py", line 38, in scan
resource_name=resource_name, resource_type=resource, skip_info=skip_info)
File "/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py", line 31, in run
check_result['result'] = self.scan_resource_conf(resource_configuration)
File "/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py", line 25, in scan_resource_conf
if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[
KeyError: 'cidr_blocks'
```
**Expected behavior**
such resource definition is perfectly valid
**Desktop (please complete the following information):**
- OS: Ubuntu `19.10`
- Python: `3.7.5`
- Checkov Version `1.0.99`
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py]
1 from checkov.terraform.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_check import BaseResourceCheck
3
4 PORT = 3389
5
6
7 class SecurityGroupUnrestrictedIngress3389(BaseResourceCheck):
8 def __init__(self):
9 name = "Ensure no security groups allow ingress from 0.0.0.0:0 to port %d" % PORT
10 id = "CKV_AWS_25"
11 supported_resources = ['aws_security_group']
12 categories = [CheckCategories.NETWORKING]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 """
17 Looks for configuration at security group ingress rules :
18 https://www.terraform.io/docs/providers/aws/r/security_group.html
19 :param conf: aws_security_group configuration
20 :return: <CheckResult>
21 """
22 if 'ingress' in conf.keys():
23 ingress_conf = conf['ingress']
24 for rule in ingress_conf:
25 if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [
26 ["0.0.0.0/0"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():
27 return CheckResult.FAILED
28
29 return CheckResult.PASSED
30
31
32 check = SecurityGroupUnrestrictedIngress3389()
33
[end of checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py]
[start of checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py]
1 from checkov.terraform.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_check import BaseResourceCheck
3
4 PORT = 22
5
6
7 class SecurityGroupUnrestrictedIngress22(BaseResourceCheck):
8 def __init__(self):
9 name = "Ensure no security groups allow ingress from 0.0.0.0:0 to port %d" % PORT
10 id = "CKV_AWS_24"
11 supported_resources = ['aws_security_group']
12 categories = [CheckCategories.NETWORKING]
13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
14
15 def scan_resource_conf(self, conf):
16 """
17 Looks for configuration at security group ingress rules :
18 https://www.terraform.io/docs/providers/aws/r/security_group.html
19 :param conf: aws_security_group configuration
20 :return: <CheckResult>
21 """
22 if 'ingress' in conf.keys():
23 ingress_conf = conf['ingress']
24 for rule in ingress_conf:
25 if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[
26 "0.0.0.0/0"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():
27 return CheckResult.FAILED
28
29 return CheckResult.PASSED
30
31
32 check = SecurityGroupUnrestrictedIngress22()
33
[end of checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py
--- a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py
+++ b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py
@@ -22,9 +22,10 @@
if 'ingress' in conf.keys():
ingress_conf = conf['ingress']
for rule in ingress_conf:
- if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[
- "0.0.0.0/0"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():
- return CheckResult.FAILED
+ if rule['from_port'] == [PORT] and rule['to_port'] == [PORT]:
+ if 'cidr_blocks' in rule.keys():
+ if rule['cidr_blocks'] == [["0.0.0.0/0"]] and 'security_groups' not in rule.keys():
+ return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py
--- a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py
+++ b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py
@@ -22,9 +22,11 @@
if 'ingress' in conf.keys():
ingress_conf = conf['ingress']
for rule in ingress_conf:
- if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [
- ["0.0.0.0/0"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():
- return CheckResult.FAILED
+ if rule['from_port'] == [PORT] and rule['to_port'] == [PORT]:
+ if 'cidr_blocks' in rule.keys():
+ if rule['cidr_blocks'] == [["0.0.0.0/0"]] and 'security_groups' not in rule.keys():
+ return CheckResult.FAILED
+
return CheckResult.PASSED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py\n--- a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py\n+++ b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py\n@@ -22,9 +22,10 @@\n if 'ingress' in conf.keys():\n ingress_conf = conf['ingress']\n for rule in ingress_conf:\n- if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[\n- \"0.0.0.0/0\"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():\n- return CheckResult.FAILED\n+ if rule['from_port'] == [PORT] and rule['to_port'] == [PORT]:\n+ if 'cidr_blocks' in rule.keys():\n+ if rule['cidr_blocks'] == [[\"0.0.0.0/0\"]] and 'security_groups' not in rule.keys():\n+ return CheckResult.FAILED\n \n return CheckResult.PASSED\n \ndiff --git a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py\n--- a/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py\n+++ b/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py\n@@ -22,9 +22,11 @@\n if 'ingress' in conf.keys():\n ingress_conf = conf['ingress']\n for rule in ingress_conf:\n- if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [\n- [\"0.0.0.0/0\"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():\n- return CheckResult.FAILED\n+ if rule['from_port'] == [PORT] and rule['to_port'] == [PORT]:\n+ if 'cidr_blocks' in rule.keys():\n+ if rule['cidr_blocks'] == [[\"0.0.0.0/0\"]] and 'security_groups' not in rule.keys():\n+ return CheckResult.FAILED\n+\n \n return CheckResult.PASSED\n", "issue": "security_groups in aws_security_group rule not supported\n**Describe the bug**\r\nreferencing a `security_group` instead of `cidr_block` in a security group rule causes an exception\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. try to run checkov on the following resource:\r\n```tf\r\nresource \"aws_security_group\" \"bar-sg\" {\r\n name = \"sg-bar\"\r\n vpc_id = aws_vpc.main.id\r\n\r\n ingress {\r\n from_port = 22\r\n to_port = 22\r\n protocol = \"tcp\"\r\n security_groups = [aws_security_group.foo-sg.id]\r\n description = \"foo\"\r\n }\r\n\r\n egress {\r\n from_port = 0\r\n to_port = 0\r\n protocol = \"-1\"\r\n cidr_blocks = [\"0.0.0.0/0\"]\r\n }\r\n\r\n}\r\n\r\n```\r\nresult:\r\n```\r\nTraceback (most recent call last):\r\n File \"/path/tf-checks/bin/checkov\", line 34, in <module>\r\n report = Runner().run(root_folder, external_checks_dir=args.external_checks_dir)\r\n File \"/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/runner.py\", line 38, in run\r\n results = resource_registry.scan(resource, scanned_file, skipped_checks)\r\n File \"/pathtf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/registry.py\", line 38, in scan\r\n resource_name=resource_name, resource_type=resource, skip_info=skip_info)\r\n File \"/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/base_check.py\", line 31, in run\r\n check_result['result'] = self.scan_resource_conf(resource_configuration)\r\n File \"/path/tf-checks/lib/python3.7/site-packages/checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py\", line 25, in scan_resource_conf\r\n if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[\r\nKeyError: 'cidr_blocks'\r\n```\r\n\r\n**Expected behavior**\r\nsuch resource definition is perfectly valid\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Ubuntu `19.10`\r\n - Python: `3.7.5`\r\n - Checkov Version `1.0.99`\r\n\r\n\r\n\n", "before_files": [{"content": "from checkov.terraform.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_check import BaseResourceCheck\n\nPORT = 3389\n\n\nclass SecurityGroupUnrestrictedIngress3389(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no security groups allow ingress from 0.0.0.0:0 to port %d\" % PORT\n id = \"CKV_AWS_25\"\n supported_resources = ['aws_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for configuration at security group ingress rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'ingress' in conf.keys():\n ingress_conf = conf['ingress']\n for rule in ingress_conf:\n if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [\n [\"0.0.0.0/0\"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupUnrestrictedIngress3389()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress3389.py"}, {"content": "from checkov.terraform.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_check import BaseResourceCheck\n\nPORT = 22\n\n\nclass SecurityGroupUnrestrictedIngress22(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure no security groups allow ingress from 0.0.0.0:0 to port %d\" % PORT\n id = \"CKV_AWS_24\"\n supported_resources = ['aws_security_group']\n categories = [CheckCategories.NETWORKING]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for configuration at security group ingress rules :\n https://www.terraform.io/docs/providers/aws/r/security_group.html\n :param conf: aws_security_group configuration\n :return: <CheckResult>\n \"\"\"\n if 'ingress' in conf.keys():\n ingress_conf = conf['ingress']\n for rule in ingress_conf:\n if rule['from_port'] == [PORT] and rule['to_port'] == [PORT] and rule['cidr_blocks'] == [[\n \"0.0.0.0/0\"]] and 'self' not in rule.keys() and 'security_groups' not in rule.keys():\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = SecurityGroupUnrestrictedIngress22()\n", "path": "checkov/terraform/checks/resource/aws/SecurityGroupUnrestrictedIngress22.py"}]} | 1,888 | 566 |
gh_patches_debug_32754 | rasdani/github-patches | git_diff | Mailu__Mailu-1349 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support for SRS
See https://en.wikipedia.org/wiki/Sender_Rewriting_Scheme and https://github.com/roehling/postsrsd
</issue>
<code>
[start of core/admin/mailu/internal/views/postfix.py]
1 from mailu import models
2 from mailu.internal import internal
3
4 import flask
5 import re
6
7
8 @internal.route("/postfix/domain/<domain_name>")
9 def postfix_mailbox_domain(domain_name):
10 if re.match("^\[.*\]$", domain_name):
11 return flask.abort(404)
12 domain = models.Domain.query.get(domain_name) or \
13 models.Alternative.query.get(domain_name) or \
14 flask.abort(404)
15 return flask.jsonify(domain.name)
16
17
18 @internal.route("/postfix/mailbox/<path:email>")
19 def postfix_mailbox_map(email):
20 user = models.User.query.get(email) or flask.abort(404)
21 return flask.jsonify(user.email)
22
23
24 @internal.route("/postfix/alias/<path:alias>")
25 def postfix_alias_map(alias):
26 localpart, domain_name = models.Email.resolve_domain(alias)
27 if localpart is None:
28 return flask.jsonify(domain_name)
29 destination = models.Email.resolve_destination(localpart, domain_name)
30 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
31
32
33 @internal.route("/postfix/transport/<path:email>")
34 def postfix_transport(email):
35 if email == '*' or re.match("(^|.*@)\[.*\]$", email):
36 return flask.abort(404)
37 localpart, domain_name = models.Email.resolve_domain(email)
38 relay = models.Relay.query.get(domain_name) or flask.abort(404)
39 return flask.jsonify("smtp:[{}]".format(relay.smtp))
40
41
42 @internal.route("/postfix/sender/login/<path:sender>")
43 def postfix_sender_login(sender):
44 localpart, domain_name = models.Email.resolve_domain(sender)
45 if localpart is None:
46 return flask.abort(404)
47 destination = models.Email.resolve_destination(localpart, domain_name, True)
48 return flask.jsonify(",".join(destination)) if destination else flask.abort(404)
49
50
51 @internal.route("/postfix/sender/access/<path:sender>")
52 def postfix_sender_access(sender):
53 """ Simply reject any sender that pretends to be from a local domain
54 """
55 if not is_void_address(sender):
56 localpart, domain_name = models.Email.resolve_domain(sender)
57 return flask.jsonify("REJECT") if models.Domain.query.get(domain_name) else flask.abort(404)
58 else:
59 return flask.abort(404)
60
61
62 def is_void_address(email):
63 '''True if the email is void (null) email address.
64 '''
65 if email.startswith('<') and email.endswith('>'):
66 email = email[1:-1]
67 # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's
68 # consider void any such thing.
69 return '@' not in email
70
[end of core/admin/mailu/internal/views/postfix.py]
[start of core/postfix/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9
10 from podop import run_server
11 from socrate import system, conf
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("senderaccess", "url", url + "sender/access/§"),
25 ("senderlogin", "url", url + "sender/login/§")
26 ])
27
28 # Actual startup script
29 os.environ["FRONT_ADDRESS"] = system.get_host_address_from_environment("FRONT", "front")
30 os.environ["ADMIN_ADDRESS"] = system.get_host_address_from_environment("ADMIN", "admin")
31 os.environ["ANTISPAM_MILTER_ADDRESS"] = system.get_host_address_from_environment("ANTISPAM_MILTER", "antispam:11332")
32 os.environ["LMTP_ADDRESS"] = system.get_host_address_from_environment("LMTP", "imap:2525")
33
34 for postfix_file in glob.glob("/conf/*.cf"):
35 conf.jinja(postfix_file, os.environ, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
36
37 if os.path.exists("/overrides/postfix.cf"):
38 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
39 os.system('postconf -e "{}"'.format(line))
40
41 if os.path.exists("/overrides/postfix.master"):
42 for line in open("/overrides/postfix.master").read().strip().split("\n"):
43 os.system('postconf -Me "{}"'.format(line))
44
45 for map_file in glob.glob("/overrides/*.map"):
46 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
47 shutil.copyfile(map_file, destination)
48 os.system("postmap {}".format(destination))
49 os.remove(destination)
50
51 if "RELAYUSER" in os.environ:
52 path = "/etc/postfix/sasl_passwd"
53 conf.jinja("/conf/sasl_passwd", os.environ, path)
54 os.system("postmap {}".format(path))
55
56 # Run Podop and Postfix
57 multiprocessing.Process(target=start_podop).start()
58 os.system("/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing")
59 os.system("postfix start-fg")
60
[end of core/postfix/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py
--- a/core/admin/mailu/internal/views/postfix.py
+++ b/core/admin/mailu/internal/views/postfix.py
@@ -3,6 +3,7 @@
import flask
import re
+import srslib
@internal.route("/postfix/domain/<domain_name>")
@@ -39,6 +40,38 @@
return flask.jsonify("smtp:[{}]".format(relay.smtp))
[email protected]("/postfix/recipient/map/<path:recipient>")
+def postfix_recipient_map(recipient):
+ """ Rewrite the envelope recipient if it is a valid SRS address.
+
+ This is meant for bounces to go back to the original sender.
+ """
+ srs = srslib.SRS(flask.current_app.config["SECRET_KEY"])
+ if srslib.SRS.is_srs_address(recipient):
+ try:
+ return flask.jsonify(srs.reverse(recipient))
+ except srslib.Error as error:
+ return flask.abort(404)
+ return flask.abort(404)
+
+
[email protected]("/postfix/sender/map/<path:sender>")
+def postfix_sender_map(sender):
+ """ Rewrite the envelope sender in case the mail was not emitted by us.
+
+ This is for bounces to come back the reverse path properly.
+ """
+ srs = srslib.SRS(flask.current_app.config["SECRET_KEY"])
+ domain = flask.current_app.config["DOMAIN"]
+ try:
+ localpart, domain_name = models.Email.resolve_domain(sender)
+ except Exception as error:
+ return flask.abort(404)
+ if models.Domain.query.get(domain_name):
+ return flask.abort(404)
+ return flask.jsonify(srs.forward(sender, domain))
+
+
@internal.route("/postfix/sender/login/<path:sender>")
def postfix_sender_login(sender):
localpart, domain_name = models.Email.resolve_domain(sender)
diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -21,6 +21,8 @@
("alias", "url", url + "alias/§"),
("domain", "url", url + "domain/§"),
("mailbox", "url", url + "mailbox/§"),
+ ("recipientmap", "url", url + "recipient/map/§"),
+ ("sendermap", "url", url + "sender/map/§"),
("senderaccess", "url", url + "sender/access/§"),
("senderlogin", "url", url + "sender/login/§")
])
| {"golden_diff": "diff --git a/core/admin/mailu/internal/views/postfix.py b/core/admin/mailu/internal/views/postfix.py\n--- a/core/admin/mailu/internal/views/postfix.py\n+++ b/core/admin/mailu/internal/views/postfix.py\n@@ -3,6 +3,7 @@\n \n import flask\n import re\n+import srslib\n \n \n @internal.route(\"/postfix/domain/<domain_name>\")\n@@ -39,6 +40,38 @@\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n \n \[email protected](\"/postfix/recipient/map/<path:recipient>\")\n+def postfix_recipient_map(recipient):\n+ \"\"\" Rewrite the envelope recipient if it is a valid SRS address.\n+\n+ This is meant for bounces to go back to the original sender.\n+ \"\"\"\n+ srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n+ if srslib.SRS.is_srs_address(recipient):\n+ try:\n+ return flask.jsonify(srs.reverse(recipient))\n+ except srslib.Error as error:\n+ return flask.abort(404)\n+ return flask.abort(404)\n+\n+\[email protected](\"/postfix/sender/map/<path:sender>\")\n+def postfix_sender_map(sender):\n+ \"\"\" Rewrite the envelope sender in case the mail was not emitted by us.\n+\n+ This is for bounces to come back the reverse path properly.\n+ \"\"\"\n+ srs = srslib.SRS(flask.current_app.config[\"SECRET_KEY\"])\n+ domain = flask.current_app.config[\"DOMAIN\"]\n+ try:\n+ localpart, domain_name = models.Email.resolve_domain(sender)\n+ except Exception as error:\n+ return flask.abort(404)\n+ if models.Domain.query.get(domain_name):\n+ return flask.abort(404)\n+ return flask.jsonify(srs.forward(sender, domain))\n+\n+\n @internal.route(\"/postfix/sender/login/<path:sender>\")\n def postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\ndiff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -21,6 +21,8 @@\n \t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n \t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n+ (\"recipientmap\", \"url\", url + \"recipient/map/\u00a7\"),\n+ (\"sendermap\", \"url\", url + \"sender/map/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n", "issue": "Support for SRS\nSee https://en.wikipedia.org/wiki/Sender_Rewriting_Scheme and https://github.com/roehling/postsrsd\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\n\nimport flask\nimport re\n\n\[email protected](\"/postfix/domain/<domain_name>\")\ndef postfix_mailbox_domain(domain_name):\n if re.match(\"^\\[.*\\]$\", domain_name):\n return flask.abort(404)\n domain = models.Domain.query.get(domain_name) or \\\n models.Alternative.query.get(domain_name) or \\\n flask.abort(404)\n return flask.jsonify(domain.name)\n\n\[email protected](\"/postfix/mailbox/<path:email>\")\ndef postfix_mailbox_map(email):\n user = models.User.query.get(email) or flask.abort(404)\n return flask.jsonify(user.email)\n\n\[email protected](\"/postfix/alias/<path:alias>\")\ndef postfix_alias_map(alias):\n localpart, domain_name = models.Email.resolve_domain(alias)\n if localpart is None:\n return flask.jsonify(domain_name)\n destination = models.Email.resolve_destination(localpart, domain_name)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/transport/<path:email>\")\ndef postfix_transport(email):\n if email == '*' or re.match(\"(^|.*@)\\[.*\\]$\", email):\n return flask.abort(404)\n localpart, domain_name = models.Email.resolve_domain(email)\n relay = models.Relay.query.get(domain_name) or flask.abort(404)\n return flask.jsonify(\"smtp:[{}]\".format(relay.smtp))\n\n\[email protected](\"/postfix/sender/login/<path:sender>\")\ndef postfix_sender_login(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n if localpart is None:\n return flask.abort(404)\n destination = models.Email.resolve_destination(localpart, domain_name, True)\n return flask.jsonify(\",\".join(destination)) if destination else flask.abort(404)\n\n\[email protected](\"/postfix/sender/access/<path:sender>\")\ndef postfix_sender_access(sender):\n \"\"\" Simply reject any sender that pretends to be from a local domain\n \"\"\"\n if not is_void_address(sender):\n localpart, domain_name = models.Email.resolve_domain(sender)\n return flask.jsonify(\"REJECT\") if models.Domain.query.get(domain_name) else flask.abort(404)\n else:\n return flask.abort(404)\n\n\ndef is_void_address(email):\n '''True if the email is void (null) email address.\n '''\n if email.startswith('<') and email.endswith('>'):\n email = email[1:-1]\n # Some MTAs use things like '<MAILER-DAEMON>' instead of '<>'; so let's\n # consider void any such thing.\n return '@' not in email\n", "path": "core/admin/mailu/internal/views/postfix.py"}, {"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.get_host_address_from_environment(\"FRONT\", \"front\")\nos.environ[\"ADMIN_ADDRESS\"] = system.get_host_address_from_environment(\"ADMIN\", \"admin\")\nos.environ[\"ANTISPAM_MILTER_ADDRESS\"] = system.get_host_address_from_environment(\"ANTISPAM_MILTER\", \"antispam:11332\")\nos.environ[\"LMTP_ADDRESS\"] = system.get_host_address_from_environment(\"LMTP\", \"imap:2525\")\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n conf.jinja(postfix_file, os.environ, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nif \"RELAYUSER\" in os.environ:\n path = \"/etc/postfix/sasl_passwd\"\n conf.jinja(\"/conf/sasl_passwd\", os.environ, path)\n os.system(\"postmap {}\".format(path))\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"/usr/libexec/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"postfix start-fg\")\n", "path": "core/postfix/start.py"}]} | 2,032 | 602 |
gh_patches_debug_32359 | rasdani/github-patches | git_diff | saulpw__visidata-2257 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fixed width saver and loader don't round trip. (Columns expand with increasing number of spaces)
**Small description**
Open `test.csv`:
``` csv
colours,counts
red,3
green,5
blue,8
```
Then save it as `test.fixed`:
```
colours counts
red 3
green 5
blue 8
```
Each column is separated by three spaces.
Now, open the newly saved `test.fixed`, and save it as `test.fixed.csv`.
Upon inspection, you will see that the three spaces have been included as column data, rather than being discarded as a separator:
```
colours,counts
red ,3
green ,5
blue ,8
```
If you repeat this process, three spaces get appended to each column every time you repeat a round trip.
**Expected result**
I expect to be able to round trip from CSV to Fixed and back without extra spaces being added to the data.
**Steps to reproduce with sample data and a .vd**
[test-fixed-saver.zip](https://github.com/saulpw/visidata/files/13938788/test-fixed-saver.zip)
**Additional context**
- saul.pw/VisiData v3.0.1
- Python 3.10.12
</issue>
<code>
[start of visidata/loaders/fixed_width.py]
1
2 from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet
3
4
5 vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')
6 vd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')
7
8 @VisiData.api
9 def open_fixed(vd, p):
10 return FixedWidthColumnsSheet(p.base_stem, source=p, headerlines=[])
11
12 class FixedWidthColumn(Column):
13 def __init__(self, name, i, j, **kwargs):
14 super().__init__(name, **kwargs)
15 self.i, self.j = i, j
16
17 def calcValue(self, row):
18 return row[0][self.i:self.j]
19
20 def putValue(self, row, value):
21 value = str(value)[:self.j-self.i]
22 j = self.j or len(row)
23 row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]
24
25 def columnize(rows):
26 'Generate (i,j) indexes for fixed-width columns found in rows'
27
28 ## find all character columns that are not spaces ever
29 allNonspaces = set()
30 for r in rows:
31 for i, ch in enumerate(r):
32 if not ch.isspace():
33 allNonspaces.add(i)
34
35 colstart = 0
36 prev = 0
37
38 # collapse fields
39 for i in allNonspaces:
40 if i > prev+1:
41 yield colstart, i
42 colstart = i
43 prev = i
44
45 yield colstart, prev+1 # final column gets rest of line
46
47
48 class FixedWidthColumnsSheet(SequenceSheet):
49 rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)
50 def addRow(self, row, index=None):
51 Sheet.addRow(self, row, index=index)
52
53 def iterload(self):
54 itsource = iter(self.source)
55
56 # compute fixed width columns from first fixed_rows lines
57 maxcols = self.options.fixed_maxcols
58 self.columns = []
59 fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))
60 for i, j in columnize(list(r[0] for r in fixedRows)):
61 if maxcols and self.nCols >= maxcols-1:
62 self.addColumn(FixedWidthColumn('', i, None))
63 break
64 else:
65 self.addColumn(FixedWidthColumn('', i, j))
66
67 yield from fixedRows
68
69 self.setColNames(self.headerlines)
70
71 yield from ([line] for line in itsource)
72
73 def setCols(self, headerlines):
74 self.headerlines = headerlines
75
76
77 @VisiData.api
78 def save_fixed(vd, p, *vsheets):
79 with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:
80 for sheet in vsheets:
81 if len(vsheets) > 1:
82 fp.write('%s\n\n' % sheet.name)
83
84 widths = {} # Column -> width:int
85 # headers
86 for col in Progress(sheet.visibleCols, gerund='sizing'):
87 widths[col] = col.getMaxWidth(sheet.rows) #1849
88 fp.write(('{0:%s} ' % widths[col]).format(col.name))
89 fp.write('\n')
90
91 # rows
92 with Progress(gerund='saving'):
93 for dispvals in sheet.iterdispvals(format=True):
94 for col, val in dispvals.items():
95 fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))
96 fp.write('\n')
97
[end of visidata/loaders/fixed_width.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py
--- a/visidata/loaders/fixed_width.py
+++ b/visidata/loaders/fixed_width.py
@@ -1,5 +1,5 @@
-from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet
+from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet, Column, dispwidth
vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')
@@ -9,6 +9,22 @@
def open_fixed(vd, p):
return FixedWidthColumnsSheet(p.base_stem, source=p, headerlines=[])
[email protected]
+def getMaxDataWidth(col, rows): #2255 need real max width for fixed width saver
+ '''Return the maximum length of any cell in column or its header,
+ even if wider than window. (Slow for large cells!)'''
+
+ w = 0
+ nlen = dispwidth(col.name)
+ if len(rows) > 0:
+ w_max = 0
+ for r in rows:
+ row_w = dispwidth(col.getDisplayValue(r))
+ if w_max < row_w:
+ w_max = row_w
+ w = w_max
+ return max(w, nlen)
+
class FixedWidthColumn(Column):
def __init__(self, name, i, j, **kwargs):
super().__init__(name, **kwargs)
@@ -38,7 +54,7 @@
# collapse fields
for i in allNonspaces:
if i > prev+1:
- yield colstart, i
+ yield colstart, prev+1 #2255
colstart = i
prev = i
@@ -84,7 +100,7 @@
widths = {} # Column -> width:int
# headers
for col in Progress(sheet.visibleCols, gerund='sizing'):
- widths[col] = col.getMaxWidth(sheet.rows) #1849
+ widths[col] = col.getMaxDataWidth(sheet.rows) #1849 #2255
fp.write(('{0:%s} ' % widths[col]).format(col.name))
fp.write('\n')
| {"golden_diff": "diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py\n--- a/visidata/loaders/fixed_width.py\n+++ b/visidata/loaders/fixed_width.py\n@@ -1,5 +1,5 @@\n \n-from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n+from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet, Column, dispwidth\n \n \n vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\n@@ -9,6 +9,22 @@\n def open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.base_stem, source=p, headerlines=[])\n \[email protected]\n+def getMaxDataWidth(col, rows): #2255 need real max width for fixed width saver\n+ '''Return the maximum length of any cell in column or its header,\n+ even if wider than window. (Slow for large cells!)'''\n+\n+ w = 0\n+ nlen = dispwidth(col.name)\n+ if len(rows) > 0:\n+ w_max = 0\n+ for r in rows:\n+ row_w = dispwidth(col.getDisplayValue(r))\n+ if w_max < row_w:\n+ w_max = row_w\n+ w = w_max\n+ return max(w, nlen)\n+\n class FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n@@ -38,7 +54,7 @@\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n- yield colstart, i\n+ yield colstart, prev+1 #2255\n colstart = i\n prev = i\n \n@@ -84,7 +100,7 @@\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n- widths[col] = col.getMaxWidth(sheet.rows) #1849\n+ widths[col] = col.getMaxDataWidth(sheet.rows) #1849 #2255\n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n", "issue": "Fixed width saver and loader don't round trip. (Columns expand with increasing number of spaces)\n**Small description**\r\n\r\nOpen `test.csv`:\r\n\r\n``` csv\r\ncolours,counts\r\nred,3\r\ngreen,5\r\nblue,8\r\n```\r\n\r\nThen save it as `test.fixed`:\r\n\r\n```\r\ncolours counts \r\nred 3 \r\ngreen 5 \r\nblue 8 \r\n```\r\n\r\nEach column is separated by three spaces.\r\n\r\nNow, open the newly saved `test.fixed`, and save it as `test.fixed.csv`.\r\nUpon inspection, you will see that the three spaces have been included as column data, rather than being discarded as a separator:\r\n\r\n```\r\ncolours,counts\r\nred ,3 \r\ngreen ,5 \r\nblue ,8 \r\n```\r\n\r\nIf you repeat this process, three spaces get appended to each column every time you repeat a round trip.\r\n\r\n**Expected result**\r\n\r\nI expect to be able to round trip from CSV to Fixed and back without extra spaces being added to the data.\r\n\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n\r\n[test-fixed-saver.zip](https://github.com/saulpw/visidata/files/13938788/test-fixed-saver.zip)\r\n\r\n**Additional context**\r\n\r\n- saul.pw/VisiData v3.0.1\r\n- Python 3.10.12\r\n\n", "before_files": [{"content": "\nfrom visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n\n\nvd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\nvd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')\n\[email protected]\ndef open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.base_stem, source=p, headerlines=[])\n\nclass FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n self.i, self.j = i, j\n\n def calcValue(self, row):\n return row[0][self.i:self.j]\n\n def putValue(self, row, value):\n value = str(value)[:self.j-self.i]\n j = self.j or len(row)\n row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]\n\ndef columnize(rows):\n 'Generate (i,j) indexes for fixed-width columns found in rows'\n\n ## find all character columns that are not spaces ever\n allNonspaces = set()\n for r in rows:\n for i, ch in enumerate(r):\n if not ch.isspace():\n allNonspaces.add(i)\n\n colstart = 0\n prev = 0\n\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n yield colstart, i\n colstart = i\n prev = i\n\n yield colstart, prev+1 # final column gets rest of line\n\n\nclass FixedWidthColumnsSheet(SequenceSheet):\n rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)\n def addRow(self, row, index=None):\n Sheet.addRow(self, row, index=index)\n\n def iterload(self):\n itsource = iter(self.source)\n\n # compute fixed width columns from first fixed_rows lines\n maxcols = self.options.fixed_maxcols\n self.columns = []\n fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))\n for i, j in columnize(list(r[0] for r in fixedRows)):\n if maxcols and self.nCols >= maxcols-1:\n self.addColumn(FixedWidthColumn('', i, None))\n break\n else:\n self.addColumn(FixedWidthColumn('', i, j))\n\n yield from fixedRows\n\n self.setColNames(self.headerlines)\n\n yield from ([line] for line in itsource)\n\n def setCols(self, headerlines):\n self.headerlines = headerlines\n\n\[email protected]\ndef save_fixed(vd, p, *vsheets):\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for sheet in vsheets:\n if len(vsheets) > 1:\n fp.write('%s\\n\\n' % sheet.name)\n\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n widths[col] = col.getMaxWidth(sheet.rows) #1849\n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n\n # rows\n with Progress(gerund='saving'):\n for dispvals in sheet.iterdispvals(format=True):\n for col, val in dispvals.items():\n fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))\n fp.write('\\n')\n", "path": "visidata/loaders/fixed_width.py"}]} | 1,847 | 526 |
gh_patches_debug_38641 | rasdani/github-patches | git_diff | abey79__vpype-507 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Remove deprecated APIs
</issue>
<code>
[start of vpype/_deprecated.py]
1 from __future__ import annotations
2
3 import logging
4
5 from .config import config_manager
6
7 # deprecated
8 CONFIG_MANAGER = config_manager
9
10
11 def block_processor(*args, **kwargs): # pragma: no cover
12 import vpype_cli
13
14 logging.warning(
15 "!!! `@vpype.block_processor` is deprecated, "
16 "use `@vpype_cli.block_processor` instead."
17 )
18 return vpype_cli.block_processor(*args, **kwargs)
19
20
21 def generator(*args, **kwargs): # pragma: no cover
22 import vpype_cli
23
24 logging.warning(
25 "!!! `@vpype.generator` is deprecated, use `@vpype_cli.generator` instead."
26 )
27 return vpype_cli.generator(*args, **kwargs)
28
29
30 def global_processor(*args, **kwargs): # pragma: no cover
31 import vpype_cli
32
33 logging.warning(
34 "!!! `@vpype.global_processor` is deprecated, "
35 "use `@vpype_cli.global_processor` instead."
36 )
37 return vpype_cli.global_processor(*args, **kwargs)
38
39
40 def layer_processor(*args, **kwargs): # pragma: no cover
41 import vpype_cli
42
43 logging.warning(
44 "!!! `@vpype.layer_processor` is deprecated, use `@vpype_cli.layer_processor` instead."
45 )
46 return vpype_cli.layer_processor(*args, **kwargs)
47
48
49 def pass_state(*args, **kwargs): # pragma: no cover
50 import vpype_cli
51
52 logging.warning(
53 "!!! `@vpype.pass_state` is deprecated, use `@vpype_cli.pass_state` instead."
54 )
55 return vpype_cli.pass_state(*args, **kwargs)
56
57
58 class AngleType: # pragma: no cover
59 def __new__(cls):
60 import vpype_cli
61
62 logging.warning(
63 "!!! `vpype.AngleType` is deprecated, use `vpype_cli.AngleType` instead."
64 )
65 return vpype_cli.AngleType()
66
67
68 class LayerType: # pragma: no cover
69 def __new__(cls, *args, **kwargs):
70 import vpype_cli
71
72 logging.warning(
73 "!!! `vpype.LayerType` is deprecated, use `vpype_cli.LayerType` instead."
74 )
75 return vpype_cli.LayerType(*args, **kwargs)
76
77
78 class LengthType: # pragma: no cover
79 def __new__(cls):
80 import vpype_cli
81
82 logging.warning(
83 "!!! `vpype.LengthType` is deprecated, use `vpype_cli.LengthType` instead."
84 )
85 return vpype_cli.LengthType()
86
87
88 class PageSizeType: # pragma: no cover
89 def __new__(cls):
90 import vpype_cli
91
92 logging.warning(
93 "!!! `vpype.PageSizeType` is deprecated, use `vpype_cli.PageSizeType` instead."
94 )
95 return vpype_cli.PageSizeType()
96
97
98 def multiple_to_layer_ids(*args, **kwargs): # pragma: no cover
99 import vpype_cli
100
101 logging.warning(
102 "!!! `vpype.multiple_to_layer_ids` is deprecated, "
103 "use `vpype_cli.multiple_to_layer_ids` instead."
104 )
105 return vpype_cli.multiple_to_layer_ids(*args, **kwargs)
106
107
108 def single_to_layer_id(*args, **kwargs): # pragma: no cover
109 import vpype_cli
110
111 logging.warning(
112 "!!! `vpype.single_to_layer_id` is deprecated, "
113 "use `vpype_cli.single_to_layer_id` instead."
114 )
115 return vpype_cli.single_to_layer_id(*args, **kwargs)
116
[end of vpype/_deprecated.py]
[start of vpype/__init__.py]
1 """This module contains vpype core and its API."""
2
3 from ._deprecated import *
4 from .config import *
5 from .filters import *
6 from .geometry import *
7 from .io import *
8 from .line_index import *
9 from .metadata import *
10 from .model import *
11 from .primitives import *
12 from .text import *
13 from .utils import *
14
15
16 def _get_version() -> str:
17 from importlib.metadata import version
18
19 return version(__name__)
20
21
22 __version__ = _get_version()
23
[end of vpype/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vpype/__init__.py b/vpype/__init__.py
--- a/vpype/__init__.py
+++ b/vpype/__init__.py
@@ -1,6 +1,5 @@
"""This module contains vpype core and its API."""
-from ._deprecated import *
from .config import *
from .filters import *
from .geometry import *
diff --git a/vpype/_deprecated.py b/vpype/_deprecated.py
deleted file mode 100644
--- a/vpype/_deprecated.py
+++ /dev/null
@@ -1,115 +0,0 @@
-from __future__ import annotations
-
-import logging
-
-from .config import config_manager
-
-# deprecated
-CONFIG_MANAGER = config_manager
-
-
-def block_processor(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `@vpype.block_processor` is deprecated, "
- "use `@vpype_cli.block_processor` instead."
- )
- return vpype_cli.block_processor(*args, **kwargs)
-
-
-def generator(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `@vpype.generator` is deprecated, use `@vpype_cli.generator` instead."
- )
- return vpype_cli.generator(*args, **kwargs)
-
-
-def global_processor(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `@vpype.global_processor` is deprecated, "
- "use `@vpype_cli.global_processor` instead."
- )
- return vpype_cli.global_processor(*args, **kwargs)
-
-
-def layer_processor(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `@vpype.layer_processor` is deprecated, use `@vpype_cli.layer_processor` instead."
- )
- return vpype_cli.layer_processor(*args, **kwargs)
-
-
-def pass_state(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `@vpype.pass_state` is deprecated, use `@vpype_cli.pass_state` instead."
- )
- return vpype_cli.pass_state(*args, **kwargs)
-
-
-class AngleType: # pragma: no cover
- def __new__(cls):
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.AngleType` is deprecated, use `vpype_cli.AngleType` instead."
- )
- return vpype_cli.AngleType()
-
-
-class LayerType: # pragma: no cover
- def __new__(cls, *args, **kwargs):
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.LayerType` is deprecated, use `vpype_cli.LayerType` instead."
- )
- return vpype_cli.LayerType(*args, **kwargs)
-
-
-class LengthType: # pragma: no cover
- def __new__(cls):
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.LengthType` is deprecated, use `vpype_cli.LengthType` instead."
- )
- return vpype_cli.LengthType()
-
-
-class PageSizeType: # pragma: no cover
- def __new__(cls):
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.PageSizeType` is deprecated, use `vpype_cli.PageSizeType` instead."
- )
- return vpype_cli.PageSizeType()
-
-
-def multiple_to_layer_ids(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.multiple_to_layer_ids` is deprecated, "
- "use `vpype_cli.multiple_to_layer_ids` instead."
- )
- return vpype_cli.multiple_to_layer_ids(*args, **kwargs)
-
-
-def single_to_layer_id(*args, **kwargs): # pragma: no cover
- import vpype_cli
-
- logging.warning(
- "!!! `vpype.single_to_layer_id` is deprecated, "
- "use `vpype_cli.single_to_layer_id` instead."
- )
- return vpype_cli.single_to_layer_id(*args, **kwargs)
| {"golden_diff": "diff --git a/vpype/__init__.py b/vpype/__init__.py\n--- a/vpype/__init__.py\n+++ b/vpype/__init__.py\n@@ -1,6 +1,5 @@\n \"\"\"This module contains vpype core and its API.\"\"\"\n \n-from ._deprecated import *\n from .config import *\n from .filters import *\n from .geometry import *\ndiff --git a/vpype/_deprecated.py b/vpype/_deprecated.py\ndeleted file mode 100644\n--- a/vpype/_deprecated.py\n+++ /dev/null\n@@ -1,115 +0,0 @@\n-from __future__ import annotations\n-\n-import logging\n-\n-from .config import config_manager\n-\n-# deprecated\n-CONFIG_MANAGER = config_manager\n-\n-\n-def block_processor(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `@vpype.block_processor` is deprecated, \"\n- \"use `@vpype_cli.block_processor` instead.\"\n- )\n- return vpype_cli.block_processor(*args, **kwargs)\n-\n-\n-def generator(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `@vpype.generator` is deprecated, use `@vpype_cli.generator` instead.\"\n- )\n- return vpype_cli.generator(*args, **kwargs)\n-\n-\n-def global_processor(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `@vpype.global_processor` is deprecated, \"\n- \"use `@vpype_cli.global_processor` instead.\"\n- )\n- return vpype_cli.global_processor(*args, **kwargs)\n-\n-\n-def layer_processor(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `@vpype.layer_processor` is deprecated, use `@vpype_cli.layer_processor` instead.\"\n- )\n- return vpype_cli.layer_processor(*args, **kwargs)\n-\n-\n-def pass_state(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `@vpype.pass_state` is deprecated, use `@vpype_cli.pass_state` instead.\"\n- )\n- return vpype_cli.pass_state(*args, **kwargs)\n-\n-\n-class AngleType: # pragma: no cover\n- def __new__(cls):\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.AngleType` is deprecated, use `vpype_cli.AngleType` instead.\"\n- )\n- return vpype_cli.AngleType()\n-\n-\n-class LayerType: # pragma: no cover\n- def __new__(cls, *args, **kwargs):\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.LayerType` is deprecated, use `vpype_cli.LayerType` instead.\"\n- )\n- return vpype_cli.LayerType(*args, **kwargs)\n-\n-\n-class LengthType: # pragma: no cover\n- def __new__(cls):\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.LengthType` is deprecated, use `vpype_cli.LengthType` instead.\"\n- )\n- return vpype_cli.LengthType()\n-\n-\n-class PageSizeType: # pragma: no cover\n- def __new__(cls):\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.PageSizeType` is deprecated, use `vpype_cli.PageSizeType` instead.\"\n- )\n- return vpype_cli.PageSizeType()\n-\n-\n-def multiple_to_layer_ids(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.multiple_to_layer_ids` is deprecated, \"\n- \"use `vpype_cli.multiple_to_layer_ids` instead.\"\n- )\n- return vpype_cli.multiple_to_layer_ids(*args, **kwargs)\n-\n-\n-def single_to_layer_id(*args, **kwargs): # pragma: no cover\n- import vpype_cli\n-\n- logging.warning(\n- \"!!! `vpype.single_to_layer_id` is deprecated, \"\n- \"use `vpype_cli.single_to_layer_id` instead.\"\n- )\n- return vpype_cli.single_to_layer_id(*args, **kwargs)\n", "issue": "Remove deprecated APIs\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nfrom .config import config_manager\n\n# deprecated\nCONFIG_MANAGER = config_manager\n\n\ndef block_processor(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `@vpype.block_processor` is deprecated, \"\n \"use `@vpype_cli.block_processor` instead.\"\n )\n return vpype_cli.block_processor(*args, **kwargs)\n\n\ndef generator(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `@vpype.generator` is deprecated, use `@vpype_cli.generator` instead.\"\n )\n return vpype_cli.generator(*args, **kwargs)\n\n\ndef global_processor(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `@vpype.global_processor` is deprecated, \"\n \"use `@vpype_cli.global_processor` instead.\"\n )\n return vpype_cli.global_processor(*args, **kwargs)\n\n\ndef layer_processor(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `@vpype.layer_processor` is deprecated, use `@vpype_cli.layer_processor` instead.\"\n )\n return vpype_cli.layer_processor(*args, **kwargs)\n\n\ndef pass_state(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `@vpype.pass_state` is deprecated, use `@vpype_cli.pass_state` instead.\"\n )\n return vpype_cli.pass_state(*args, **kwargs)\n\n\nclass AngleType: # pragma: no cover\n def __new__(cls):\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.AngleType` is deprecated, use `vpype_cli.AngleType` instead.\"\n )\n return vpype_cli.AngleType()\n\n\nclass LayerType: # pragma: no cover\n def __new__(cls, *args, **kwargs):\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.LayerType` is deprecated, use `vpype_cli.LayerType` instead.\"\n )\n return vpype_cli.LayerType(*args, **kwargs)\n\n\nclass LengthType: # pragma: no cover\n def __new__(cls):\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.LengthType` is deprecated, use `vpype_cli.LengthType` instead.\"\n )\n return vpype_cli.LengthType()\n\n\nclass PageSizeType: # pragma: no cover\n def __new__(cls):\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.PageSizeType` is deprecated, use `vpype_cli.PageSizeType` instead.\"\n )\n return vpype_cli.PageSizeType()\n\n\ndef multiple_to_layer_ids(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.multiple_to_layer_ids` is deprecated, \"\n \"use `vpype_cli.multiple_to_layer_ids` instead.\"\n )\n return vpype_cli.multiple_to_layer_ids(*args, **kwargs)\n\n\ndef single_to_layer_id(*args, **kwargs): # pragma: no cover\n import vpype_cli\n\n logging.warning(\n \"!!! `vpype.single_to_layer_id` is deprecated, \"\n \"use `vpype_cli.single_to_layer_id` instead.\"\n )\n return vpype_cli.single_to_layer_id(*args, **kwargs)\n", "path": "vpype/_deprecated.py"}, {"content": "\"\"\"This module contains vpype core and its API.\"\"\"\n\nfrom ._deprecated import *\nfrom .config import *\nfrom .filters import *\nfrom .geometry import *\nfrom .io import *\nfrom .line_index import *\nfrom .metadata import *\nfrom .model import *\nfrom .primitives import *\nfrom .text import *\nfrom .utils import *\n\n\ndef _get_version() -> str:\n from importlib.metadata import version\n\n return version(__name__)\n\n\n__version__ = _get_version()\n", "path": "vpype/__init__.py"}]} | 1,731 | 994 |
gh_patches_debug_33327 | rasdani/github-patches | git_diff | comfyanonymous__ComfyUI-2207 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
HyperTile node is nondeterministic across executions and messes with global randomness
The HyperTile node uses the random module and seeds the global random with its own counter variable.
Unfortunately, this counter variable is retained across executions if the HyperTile parameters don't change, and so every execution will have different results.
The effect on global random can be avoided just by using a `random.Random()` instance instead of a counter, but since ComfyUI doesn't provide any kind of after-exec function for nodes, there doesn't seem to be a way to reset it to its initial state after one prompt is executed.
I suppose you could work around this by setting having IS_CHANGED return something so that the node gets always executed, thus reinitializing randomness, but that might cause any nodes that come after the HyperTile node to needlessly re-execute.
</issue>
<code>
[start of comfy_extras/nodes_hypertile.py]
1 #Taken from: https://github.com/tfernd/HyperTile/
2
3 import math
4 from einops import rearrange
5 import random
6
7 def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int:
8 min_value = min(min_value, value)
9
10 # All big divisors of value (inclusive)
11 divisors = [i for i in range(min_value, value + 1) if value % i == 0]
12
13 ns = [value // i for i in divisors[:max_options]] # has at least 1 element
14
15 random.seed(counter)
16 idx = random.randint(0, len(ns) - 1)
17
18 return ns[idx]
19
20 class HyperTile:
21 @classmethod
22 def INPUT_TYPES(s):
23 return {"required": { "model": ("MODEL",),
24 "tile_size": ("INT", {"default": 256, "min": 1, "max": 2048}),
25 "swap_size": ("INT", {"default": 2, "min": 1, "max": 128}),
26 "max_depth": ("INT", {"default": 0, "min": 0, "max": 10}),
27 "scale_depth": ("BOOLEAN", {"default": False}),
28 }}
29 RETURN_TYPES = ("MODEL",)
30 FUNCTION = "patch"
31
32 CATEGORY = "_for_testing"
33
34 def patch(self, model, tile_size, swap_size, max_depth, scale_depth):
35 model_channels = model.model.model_config.unet_config["model_channels"]
36
37 apply_to = set()
38 temp = model_channels
39 for x in range(max_depth + 1):
40 apply_to.add(temp)
41 temp *= 2
42
43 latent_tile_size = max(32, tile_size) // 8
44 self.temp = None
45 self.counter = 1
46
47 def hypertile_in(q, k, v, extra_options):
48 if q.shape[-1] in apply_to:
49 shape = extra_options["original_shape"]
50 aspect_ratio = shape[-1] / shape[-2]
51
52 hw = q.size(1)
53 h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
54
55 factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
56 nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter)
57 self.counter += 1
58 nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter)
59 self.counter += 1
60
61 if nh * nw > 1:
62 q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
63 self.temp = (nh, nw, h, w)
64 return q, k, v
65
66 return q, k, v
67 def hypertile_out(out, extra_options):
68 if self.temp is not None:
69 nh, nw, h, w = self.temp
70 self.temp = None
71 out = rearrange(out, "(b nh nw) hw c -> b nh nw hw c", nh=nh, nw=nw)
72 out = rearrange(out, "b nh nw (h w) c -> b (nh h nw w) c", h=h // nh, w=w // nw)
73 return out
74
75
76 m = model.clone()
77 m.set_model_attn1_patch(hypertile_in)
78 m.set_model_attn1_output_patch(hypertile_out)
79 return (m, )
80
81 NODE_CLASS_MAPPINGS = {
82 "HyperTile": HyperTile,
83 }
84
[end of comfy_extras/nodes_hypertile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py
--- a/comfy_extras/nodes_hypertile.py
+++ b/comfy_extras/nodes_hypertile.py
@@ -2,9 +2,10 @@
import math
from einops import rearrange
-import random
+# Use torch rng for consistency across generations
+from torch import randint
-def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int:
+def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:
min_value = min(min_value, value)
# All big divisors of value (inclusive)
@@ -12,8 +13,7 @@
ns = [value // i for i in divisors[:max_options]] # has at least 1 element
- random.seed(counter)
- idx = random.randint(0, len(ns) - 1)
+ idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()
return ns[idx]
@@ -42,7 +42,6 @@
latent_tile_size = max(32, tile_size) // 8
self.temp = None
- self.counter = 1
def hypertile_in(q, k, v, extra_options):
if q.shape[-1] in apply_to:
@@ -53,10 +52,8 @@
h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))
factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1
- nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter)
- self.counter += 1
- nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter)
- self.counter += 1
+ nh = random_divisor(h, latent_tile_size * factor, swap_size)
+ nw = random_divisor(w, latent_tile_size * factor, swap_size)
if nh * nw > 1:
q = rearrange(q, "b (nh h nw w) c -> (b nh nw) (h w) c", h=h // nh, w=w // nw, nh=nh, nw=nw)
| {"golden_diff": "diff --git a/comfy_extras/nodes_hypertile.py b/comfy_extras/nodes_hypertile.py\n--- a/comfy_extras/nodes_hypertile.py\n+++ b/comfy_extras/nodes_hypertile.py\n@@ -2,9 +2,10 @@\n \n import math\n from einops import rearrange\n-import random\n+# Use torch rng for consistency across generations\n+from torch import randint\n \n-def random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int:\n+def random_divisor(value: int, min_value: int, /, max_options: int = 1) -> int:\n min_value = min(min_value, value)\n \n # All big divisors of value (inclusive)\n@@ -12,8 +13,7 @@\n \n ns = [value // i for i in divisors[:max_options]] # has at least 1 element\n \n- random.seed(counter)\n- idx = random.randint(0, len(ns) - 1)\n+ idx = randint(low=0, high=len(ns) - 1, size=(1,)).item()\n \n return ns[idx]\n \n@@ -42,7 +42,6 @@\n \n latent_tile_size = max(32, tile_size) // 8\n self.temp = None\n- self.counter = 1\n \n def hypertile_in(q, k, v, extra_options):\n if q.shape[-1] in apply_to:\n@@ -53,10 +52,8 @@\n h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))\n \n factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1\n- nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter)\n- self.counter += 1\n- nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter)\n- self.counter += 1\n+ nh = random_divisor(h, latent_tile_size * factor, swap_size)\n+ nw = random_divisor(w, latent_tile_size * factor, swap_size)\n \n if nh * nw > 1:\n q = rearrange(q, \"b (nh h nw w) c -> (b nh nw) (h w) c\", h=h // nh, w=w // nw, nh=nh, nw=nw)\n", "issue": "HyperTile node is nondeterministic across executions and messes with global randomness\nThe HyperTile node uses the random module and seeds the global random with its own counter variable.\r\n\r\nUnfortunately, this counter variable is retained across executions if the HyperTile parameters don't change, and so every execution will have different results.\r\n\r\nThe effect on global random can be avoided just by using a `random.Random()` instance instead of a counter, but since ComfyUI doesn't provide any kind of after-exec function for nodes, there doesn't seem to be a way to reset it to its initial state after one prompt is executed.\r\n\r\nI suppose you could work around this by setting having IS_CHANGED return something so that the node gets always executed, thus reinitializing randomness, but that might cause any nodes that come after the HyperTile node to needlessly re-execute.\r\n\r\n\n", "before_files": [{"content": "#Taken from: https://github.com/tfernd/HyperTile/\n\nimport math\nfrom einops import rearrange\nimport random\n\ndef random_divisor(value: int, min_value: int, /, max_options: int = 1, counter = 0) -> int:\n min_value = min(min_value, value)\n\n # All big divisors of value (inclusive)\n divisors = [i for i in range(min_value, value + 1) if value % i == 0]\n\n ns = [value // i for i in divisors[:max_options]] # has at least 1 element\n\n random.seed(counter)\n idx = random.randint(0, len(ns) - 1)\n\n return ns[idx]\n\nclass HyperTile:\n @classmethod\n def INPUT_TYPES(s):\n return {\"required\": { \"model\": (\"MODEL\",),\n \"tile_size\": (\"INT\", {\"default\": 256, \"min\": 1, \"max\": 2048}),\n \"swap_size\": (\"INT\", {\"default\": 2, \"min\": 1, \"max\": 128}),\n \"max_depth\": (\"INT\", {\"default\": 0, \"min\": 0, \"max\": 10}),\n \"scale_depth\": (\"BOOLEAN\", {\"default\": False}),\n }}\n RETURN_TYPES = (\"MODEL\",)\n FUNCTION = \"patch\"\n\n CATEGORY = \"_for_testing\"\n\n def patch(self, model, tile_size, swap_size, max_depth, scale_depth):\n model_channels = model.model.model_config.unet_config[\"model_channels\"]\n\n apply_to = set()\n temp = model_channels\n for x in range(max_depth + 1):\n apply_to.add(temp)\n temp *= 2\n\n latent_tile_size = max(32, tile_size) // 8\n self.temp = None\n self.counter = 1\n\n def hypertile_in(q, k, v, extra_options):\n if q.shape[-1] in apply_to:\n shape = extra_options[\"original_shape\"]\n aspect_ratio = shape[-1] / shape[-2]\n\n hw = q.size(1)\n h, w = round(math.sqrt(hw * aspect_ratio)), round(math.sqrt(hw / aspect_ratio))\n\n factor = 2**((q.shape[-1] // model_channels) - 1) if scale_depth else 1\n nh = random_divisor(h, latent_tile_size * factor, swap_size, self.counter)\n self.counter += 1\n nw = random_divisor(w, latent_tile_size * factor, swap_size, self.counter)\n self.counter += 1\n\n if nh * nw > 1:\n q = rearrange(q, \"b (nh h nw w) c -> (b nh nw) (h w) c\", h=h // nh, w=w // nw, nh=nh, nw=nw)\n self.temp = (nh, nw, h, w)\n return q, k, v\n\n return q, k, v\n def hypertile_out(out, extra_options):\n if self.temp is not None:\n nh, nw, h, w = self.temp\n self.temp = None\n out = rearrange(out, \"(b nh nw) hw c -> b nh nw hw c\", nh=nh, nw=nw)\n out = rearrange(out, \"b nh nw (h w) c -> b (nh h nw w) c\", h=h // nh, w=w // nw)\n return out\n\n\n m = model.clone()\n m.set_model_attn1_patch(hypertile_in)\n m.set_model_attn1_output_patch(hypertile_out)\n return (m, )\n\nNODE_CLASS_MAPPINGS = {\n \"HyperTile\": HyperTile,\n}\n", "path": "comfy_extras/nodes_hypertile.py"}]} | 1,703 | 549 |
gh_patches_debug_26976 | rasdani/github-patches | git_diff | neptune-ai__neptune-client-197 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
experiment.get_system_properties() doesn't return "hostname"
I think there is some regression. For recent experiments `experiment.get_properties()` return either an empty dictionary or `{'key1': 'value1', 'key2': '17', 'key3': 'other-value'}` (whatever that is) in case of the sandbox project.
For older experiments, I still can get the properties.
This is probably a backend issue but there is no better place to put it.
</issue>
<code>
[start of neptune/internal/streams/channel_writer.py]
1 #
2 # Copyright (c) 2019, Neptune Labs Sp. z o.o.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16
17 from __future__ import unicode_literals
18
19 import re
20 import time
21
22 from neptune.internal.channels.channels import ChannelNamespace, ChannelValue, ChannelType
23
24
25 class ChannelWriter(object):
26 __SPLIT_PATTERN = re.compile(r'[\n\r]{1,2}')
27
28 def __init__(self, experiment, channel_name, channel_namespace=ChannelNamespace.USER):
29 self.time_started_ms = time.time() * 1000
30 self._experiment = experiment
31 self._channel_name = channel_name
32 self._channel_namespace = channel_namespace
33 self._data = None
34
35 def write(self, data):
36 if self._data is None:
37 self._data = data
38 else:
39 self._data += data
40 lines = self.__SPLIT_PATTERN.split(self._data)
41 for line in lines[:-1]:
42 value = ChannelValue(
43 x=time.time() * 1000 - self.time_started_ms,
44 y=dict(text_value=str(line)),
45 ts=None
46 )
47 # pylint: disable=protected-access
48 self._experiment._channels_values_sender.send(
49 channel_name=self._channel_name,
50 channel_type=ChannelType.TEXT.value,
51 channel_value=value,
52 channel_namespace=self._channel_namespace
53 )
54
55 self._data = lines[-1]
56
[end of neptune/internal/streams/channel_writer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/neptune/internal/streams/channel_writer.py b/neptune/internal/streams/channel_writer.py
--- a/neptune/internal/streams/channel_writer.py
+++ b/neptune/internal/streams/channel_writer.py
@@ -16,8 +16,8 @@
from __future__ import unicode_literals
+from datetime import datetime
import re
-import time
from neptune.internal.channels.channels import ChannelNamespace, ChannelValue, ChannelType
@@ -26,7 +26,7 @@
__SPLIT_PATTERN = re.compile(r'[\n\r]{1,2}')
def __init__(self, experiment, channel_name, channel_namespace=ChannelNamespace.USER):
- self.time_started_ms = time.time() * 1000
+ self._time_started = experiment.get_system_properties()['created']
self._experiment = experiment
self._channel_name = channel_name
self._channel_namespace = channel_namespace
@@ -40,7 +40,7 @@
lines = self.__SPLIT_PATTERN.split(self._data)
for line in lines[:-1]:
value = ChannelValue(
- x=time.time() * 1000 - self.time_started_ms,
+ x=(datetime.now(tz=self._time_started.tzinfo) - self._time_started).total_seconds() * 1000,
y=dict(text_value=str(line)),
ts=None
)
| {"golden_diff": "diff --git a/neptune/internal/streams/channel_writer.py b/neptune/internal/streams/channel_writer.py\n--- a/neptune/internal/streams/channel_writer.py\n+++ b/neptune/internal/streams/channel_writer.py\n@@ -16,8 +16,8 @@\n \n from __future__ import unicode_literals\n \n+from datetime import datetime\n import re\n-import time\n \n from neptune.internal.channels.channels import ChannelNamespace, ChannelValue, ChannelType\n \n@@ -26,7 +26,7 @@\n __SPLIT_PATTERN = re.compile(r'[\\n\\r]{1,2}')\n \n def __init__(self, experiment, channel_name, channel_namespace=ChannelNamespace.USER):\n- self.time_started_ms = time.time() * 1000\n+ self._time_started = experiment.get_system_properties()['created']\n self._experiment = experiment\n self._channel_name = channel_name\n self._channel_namespace = channel_namespace\n@@ -40,7 +40,7 @@\n lines = self.__SPLIT_PATTERN.split(self._data)\n for line in lines[:-1]:\n value = ChannelValue(\n- x=time.time() * 1000 - self.time_started_ms,\n+ x=(datetime.now(tz=self._time_started.tzinfo) - self._time_started).total_seconds() * 1000,\n y=dict(text_value=str(line)),\n ts=None\n )\n", "issue": "experiment.get_system_properties() doesn't return \"hostname\"\nI think there is some regression. For recent experiments `experiment.get_properties()` return either an empty dictionary or `{'key1': 'value1', 'key2': '17', 'key3': 'other-value'}` (whatever that is) in case of the sandbox project.\r\n\r\nFor older experiments, I still can get the properties. \r\n\r\nThis is probably a backend issue but there is no better place to put it.\n", "before_files": [{"content": "#\n# Copyright (c) 2019, Neptune Labs Sp. z o.o.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import unicode_literals\n\nimport re\nimport time\n\nfrom neptune.internal.channels.channels import ChannelNamespace, ChannelValue, ChannelType\n\n\nclass ChannelWriter(object):\n __SPLIT_PATTERN = re.compile(r'[\\n\\r]{1,2}')\n\n def __init__(self, experiment, channel_name, channel_namespace=ChannelNamespace.USER):\n self.time_started_ms = time.time() * 1000\n self._experiment = experiment\n self._channel_name = channel_name\n self._channel_namespace = channel_namespace\n self._data = None\n\n def write(self, data):\n if self._data is None:\n self._data = data\n else:\n self._data += data\n lines = self.__SPLIT_PATTERN.split(self._data)\n for line in lines[:-1]:\n value = ChannelValue(\n x=time.time() * 1000 - self.time_started_ms,\n y=dict(text_value=str(line)),\n ts=None\n )\n # pylint: disable=protected-access\n self._experiment._channels_values_sender.send(\n channel_name=self._channel_name,\n channel_type=ChannelType.TEXT.value,\n channel_value=value,\n channel_namespace=self._channel_namespace\n )\n\n self._data = lines[-1]\n", "path": "neptune/internal/streams/channel_writer.py"}]} | 1,162 | 303 |
gh_patches_debug_16917 | rasdani/github-patches | git_diff | deeppavlov__DeepPavlov-100 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Files not found while running telegram bot
I run telegram bot interface (copy-paste from readme)
```
python -m deeppavlov.deep interactbot deeppavlov/configs/go_bot/gobot_dstc2.json -t TELEGRAM_TOKEN
```
But the error is given:
```
File "/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/file.py", line 22, in read_json
with open(fpath) as fin:
FileNotFoundError: [Errno 2] No such file or directory: '../telegram_utils/models_info.json'
```
All files have been downloaded and successfully used in training.
</issue>
<code>
[start of telegram_utils/telegram_ui.py]
1 """
2 Copyright 2017 Neural Networks and Deep Learning lab, MIPT
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 """
16 import telebot
17
18 from deeppavlov.core.common.file import read_json
19 from deeppavlov.core.commands.infer import build_model_from_config
20
21
22 def init_bot_for_model(token, model):
23 bot = telebot.TeleBot(token)
24
25 model_name = type(model).__name__
26 models_info = read_json('../telegram_utils/models_info.json')
27 model_info = models_info[model_name] if model_name in models_info else models_info['@default']
28
29 @bot.message_handler(commands=['start'])
30 def send_start_message(message):
31 chat_id = message.chat.id
32 out_message = model_info['start_message']
33 if hasattr(model, 'reset'):
34 model.reset()
35 bot.send_message(chat_id, out_message)
36
37 @bot.message_handler(commands=['help'])
38 def send_help_message(message):
39 chat_id = message.chat.id
40 out_message = model_info['help_message']
41 bot.send_message(chat_id, out_message)
42
43 @bot.message_handler()
44 def handle_inference(message):
45 chat_id = message.chat.id
46 context = message.text
47
48 pred = model([context])
49 reply_message = str(pred[0])
50 bot.send_message(chat_id, reply_message)
51
52 bot.polling()
53
54
55 def interact_model_by_telegram(config_path, token):
56 config = read_json(config_path)
57 model = build_model_from_config(config)
58 init_bot_for_model(token, model)
59
[end of telegram_utils/telegram_ui.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py
--- a/telegram_utils/telegram_ui.py
+++ b/telegram_utils/telegram_ui.py
@@ -13,6 +13,8 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
+from pathlib import Path
+
import telebot
from deeppavlov.core.common.file import read_json
@@ -23,7 +25,8 @@
bot = telebot.TeleBot(token)
model_name = type(model).__name__
- models_info = read_json('../telegram_utils/models_info.json')
+ config_path = Path(__file__).parent / 'models_info.json'
+ models_info = read_json(str(config_path))
model_info = models_info[model_name] if model_name in models_info else models_info['@default']
@bot.message_handler(commands=['start'])
| {"golden_diff": "diff --git a/telegram_utils/telegram_ui.py b/telegram_utils/telegram_ui.py\n--- a/telegram_utils/telegram_ui.py\n+++ b/telegram_utils/telegram_ui.py\n@@ -13,6 +13,8 @@\n See the License for the specific language governing permissions and\n limitations under the License.\n \"\"\"\n+from pathlib import Path\n+\n import telebot\n \n from deeppavlov.core.common.file import read_json\n@@ -23,7 +25,8 @@\n bot = telebot.TeleBot(token)\n \n model_name = type(model).__name__\n- models_info = read_json('../telegram_utils/models_info.json')\n+ config_path = Path(__file__).parent / 'models_info.json'\n+ models_info = read_json(str(config_path))\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n \n @bot.message_handler(commands=['start'])\n", "issue": "Files not found while running telegram bot \nI run telegram bot interface (copy-paste from readme)\r\n```\r\npython -m deeppavlov.deep interactbot deeppavlov/configs/go_bot/gobot_dstc2.json -t TELEGRAM_TOKEN\r\n```\r\n\r\nBut the error is given:\r\n\r\n``` \r\nFile \"/home/ubuntu/work/ipavlov/DeepPavlov/deeppavlov/core/common/file.py\", line 22, in read_json\r\n with open(fpath) as fin:\r\nFileNotFoundError: [Errno 2] No such file or directory: '../telegram_utils/models_info.json' \r\n```\r\n\r\nAll files have been downloaded and successfully used in training.\n", "before_files": [{"content": "\"\"\"\nCopyright 2017 Neural Networks and Deep Learning lab, MIPT\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport telebot\n\nfrom deeppavlov.core.common.file import read_json\nfrom deeppavlov.core.commands.infer import build_model_from_config\n\n\ndef init_bot_for_model(token, model):\n bot = telebot.TeleBot(token)\n\n model_name = type(model).__name__\n models_info = read_json('../telegram_utils/models_info.json')\n model_info = models_info[model_name] if model_name in models_info else models_info['@default']\n\n @bot.message_handler(commands=['start'])\n def send_start_message(message):\n chat_id = message.chat.id\n out_message = model_info['start_message']\n if hasattr(model, 'reset'):\n model.reset()\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler(commands=['help'])\n def send_help_message(message):\n chat_id = message.chat.id\n out_message = model_info['help_message']\n bot.send_message(chat_id, out_message)\n\n @bot.message_handler()\n def handle_inference(message):\n chat_id = message.chat.id\n context = message.text\n\n pred = model([context])\n reply_message = str(pred[0])\n bot.send_message(chat_id, reply_message)\n\n bot.polling()\n\n\ndef interact_model_by_telegram(config_path, token):\n config = read_json(config_path)\n model = build_model_from_config(config)\n init_bot_for_model(token, model)\n", "path": "telegram_utils/telegram_ui.py"}]} | 1,214 | 197 |
gh_patches_debug_42952 | rasdani/github-patches | git_diff | ansible__ansible-lint-2832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
no-changelog: check_collection_changelog
check the collection structure to confirm if there is a changelog file present at the root of the directory.
rule should be optional and only enabled at users discretion. Error level should be Warn/Info (0)
based off of code found [here ](https://github.com/ansible/galaxy-importer/blob/master/galaxy_importer/loaders/collection.py#L119)
</issue>
<code>
[start of src/ansiblelint/rules/galaxy.py]
1 """Implementation of GalaxyRule."""
2 from __future__ import annotations
3
4 import sys
5 from functools import total_ordering
6 from typing import TYPE_CHECKING, Any
7
8 from ansiblelint.constants import LINE_NUMBER_KEY
9 from ansiblelint.errors import MatchError
10 from ansiblelint.rules import AnsibleLintRule
11
12 if TYPE_CHECKING:
13 from ansiblelint.file_utils import Lintable
14
15
16 class GalaxyRule(AnsibleLintRule):
17 """Rule for checking collection version is greater than 1.0.0."""
18
19 id = "galaxy"
20 description = "Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0"
21 severity = "MEDIUM"
22 tags = ["metadata", "opt-in", "experimental"]
23 version_added = "v6.6.0 (last update)"
24
25 def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:
26 """Return matches found for a specific play (entry in playbook)."""
27 if file.kind != "galaxy": # type: ignore
28 return []
29 if "version" not in data:
30 return [
31 self.create_matcherror(
32 message="galaxy.yaml should have version tag.",
33 linenumber=data[LINE_NUMBER_KEY],
34 tag="galaxy[version-missing]",
35 filename=file,
36 )
37 ]
38 version = data.get("version")
39 if Version(version) < Version("1.0.0"):
40 return [
41 self.create_matcherror(
42 message="collection version should be greater than or equal to 1.0.0",
43 # pylint: disable=protected-access
44 linenumber=version._line_number,
45 tag="galaxy[version-incorrect]",
46 filename=file,
47 )
48 ]
49 return []
50
51
52 @total_ordering
53 class Version:
54 """Simple class to compare arbitrary versions."""
55
56 def __init__(self, version_string: str):
57 """Construct a Version object."""
58 self.components = version_string.split(".")
59
60 def __eq__(self, other: object) -> bool:
61 """Implement equality comparison."""
62 other = _coerce(other)
63 if not isinstance(other, Version):
64 return NotImplemented
65
66 return self.components == other.components
67
68 def __lt__(self, other: Version) -> bool:
69 """Implement lower-than operation."""
70 other = _coerce(other)
71 if not isinstance(other, Version):
72 return NotImplemented
73
74 return self.components < other.components
75
76
77 def _coerce(other: object) -> Version:
78 if isinstance(other, str):
79 other = Version(other)
80 if isinstance(other, (int, float)):
81 other = Version(str(other))
82 if isinstance(other, Version):
83 return other
84 raise NotImplementedError(f"Unable to coerce object type {type(other)} to Version")
85
86
87 if "pytest" in sys.modules: # noqa: C901
88
89 from ansiblelint.rules import RulesCollection
90 from ansiblelint.runner import Runner
91
92 def test_galaxy_collection_version_positive() -> None:
93 """Positive test for collection version in galaxy."""
94 collection = RulesCollection()
95 collection.register(GalaxyRule())
96 success = "examples/collection/galaxy.yml"
97 good_runner = Runner(success, rules=collection)
98 assert [] == good_runner.run()
99
100 def test_galaxy_collection_version_negative() -> None:
101 """Negative test for collection version in galaxy."""
102 collection = RulesCollection()
103 collection.register(GalaxyRule())
104 failure = "examples/meta/galaxy.yml"
105 bad_runner = Runner(failure, rules=collection)
106 errs = bad_runner.run()
107 assert len(errs) == 1
108
[end of src/ansiblelint/rules/galaxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/ansiblelint/rules/galaxy.py b/src/ansiblelint/rules/galaxy.py
--- a/src/ansiblelint/rules/galaxy.py
+++ b/src/ansiblelint/rules/galaxy.py
@@ -1,6 +1,7 @@
"""Implementation of GalaxyRule."""
from __future__ import annotations
+import os
import sys
from functools import total_ordering
from typing import TYPE_CHECKING, Any
@@ -14,10 +15,10 @@
class GalaxyRule(AnsibleLintRule):
- """Rule for checking collection version is greater than 1.0.0."""
+ """Rule for checking collection version is greater than 1.0.0 and checking for changelog."""
id = "galaxy"
- description = "Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0"
+ description = "Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0 and check for changelog."
severity = "MEDIUM"
tags = ["metadata", "opt-in", "experimental"]
version_added = "v6.6.0 (last update)"
@@ -26,18 +27,21 @@
"""Return matches found for a specific play (entry in playbook)."""
if file.kind != "galaxy": # type: ignore
return []
+
+ results = []
+
if "version" not in data:
- return [
+ results.append(
self.create_matcherror(
message="galaxy.yaml should have version tag.",
linenumber=data[LINE_NUMBER_KEY],
tag="galaxy[version-missing]",
filename=file,
)
- ]
+ )
version = data.get("version")
if Version(version) < Version("1.0.0"):
- return [
+ results.append(
self.create_matcherror(
message="collection version should be greater than or equal to 1.0.0",
# pylint: disable=protected-access
@@ -45,8 +49,33 @@
tag="galaxy[version-incorrect]",
filename=file,
)
- ]
- return []
+ )
+
+ # Changelog Check - building off Galaxy rule as there is no current way to check
+ # for a nonexistent file
+
+ base_path = os.path.split(str(file.abspath))[0]
+ changelog_found = 0
+ changelog_paths = [
+ os.path.join(base_path, "changelogs", "changelog.yaml"),
+ os.path.join(base_path, "CHANGELOG.rst"),
+ os.path.join(base_path, "CHANGELOG.md"),
+ ]
+
+ for path in changelog_paths:
+ if os.path.isfile(path):
+ changelog_found = 1
+
+ if not changelog_found:
+ results.append(
+ self.create_matcherror(
+ message="No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.",
+ tag="galaxy[no-changelog]",
+ filename=file,
+ )
+ )
+
+ return results
@total_ordering
@@ -105,3 +134,18 @@
bad_runner = Runner(failure, rules=collection)
errs = bad_runner.run()
assert len(errs) == 1
+
+ def test_changelog_present() -> None:
+ """Positive test for finding a changelog."""
+ collection = RulesCollection()
+ collection.register(GalaxyRule())
+ good_runner = Runner("examples/collection/galaxy.yml", rules=collection)
+ assert [] == good_runner.run()
+
+ def test_changelog_missing() -> None:
+ """Negative test for finding a changelog."""
+ collection = RulesCollection()
+ collection.register(GalaxyRule())
+ bad_runner = Runner("examples/no_changelog/galaxy.yml", rules=collection)
+ errs = bad_runner.run()
+ assert len(errs) == 1
| {"golden_diff": "diff --git a/src/ansiblelint/rules/galaxy.py b/src/ansiblelint/rules/galaxy.py\n--- a/src/ansiblelint/rules/galaxy.py\n+++ b/src/ansiblelint/rules/galaxy.py\n@@ -1,6 +1,7 @@\n \"\"\"Implementation of GalaxyRule.\"\"\"\n from __future__ import annotations\n \n+import os\n import sys\n from functools import total_ordering\n from typing import TYPE_CHECKING, Any\n@@ -14,10 +15,10 @@\n \n \n class GalaxyRule(AnsibleLintRule):\n- \"\"\"Rule for checking collection version is greater than 1.0.0.\"\"\"\n+ \"\"\"Rule for checking collection version is greater than 1.0.0 and checking for changelog.\"\"\"\n \n id = \"galaxy\"\n- description = \"Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0\"\n+ description = \"Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0 and check for changelog.\"\n severity = \"MEDIUM\"\n tags = [\"metadata\", \"opt-in\", \"experimental\"]\n version_added = \"v6.6.0 (last update)\"\n@@ -26,18 +27,21 @@\n \"\"\"Return matches found for a specific play (entry in playbook).\"\"\"\n if file.kind != \"galaxy\": # type: ignore\n return []\n+\n+ results = []\n+\n if \"version\" not in data:\n- return [\n+ results.append(\n self.create_matcherror(\n message=\"galaxy.yaml should have version tag.\",\n linenumber=data[LINE_NUMBER_KEY],\n tag=\"galaxy[version-missing]\",\n filename=file,\n )\n- ]\n+ )\n version = data.get(\"version\")\n if Version(version) < Version(\"1.0.0\"):\n- return [\n+ results.append(\n self.create_matcherror(\n message=\"collection version should be greater than or equal to 1.0.0\",\n # pylint: disable=protected-access\n@@ -45,8 +49,33 @@\n tag=\"galaxy[version-incorrect]\",\n filename=file,\n )\n- ]\n- return []\n+ )\n+\n+ # Changelog Check - building off Galaxy rule as there is no current way to check\n+ # for a nonexistent file\n+\n+ base_path = os.path.split(str(file.abspath))[0]\n+ changelog_found = 0\n+ changelog_paths = [\n+ os.path.join(base_path, \"changelogs\", \"changelog.yaml\"),\n+ os.path.join(base_path, \"CHANGELOG.rst\"),\n+ os.path.join(base_path, \"CHANGELOG.md\"),\n+ ]\n+\n+ for path in changelog_paths:\n+ if os.path.isfile(path):\n+ changelog_found = 1\n+\n+ if not changelog_found:\n+ results.append(\n+ self.create_matcherror(\n+ message=\"No changelog found. Please add a changelog file. Refer to the galaxy.md file for more info.\",\n+ tag=\"galaxy[no-changelog]\",\n+ filename=file,\n+ )\n+ )\n+\n+ return results\n \n \n @total_ordering\n@@ -105,3 +134,18 @@\n bad_runner = Runner(failure, rules=collection)\n errs = bad_runner.run()\n assert len(errs) == 1\n+\n+ def test_changelog_present() -> None:\n+ \"\"\"Positive test for finding a changelog.\"\"\"\n+ collection = RulesCollection()\n+ collection.register(GalaxyRule())\n+ good_runner = Runner(\"examples/collection/galaxy.yml\", rules=collection)\n+ assert [] == good_runner.run()\n+\n+ def test_changelog_missing() -> None:\n+ \"\"\"Negative test for finding a changelog.\"\"\"\n+ collection = RulesCollection()\n+ collection.register(GalaxyRule())\n+ bad_runner = Runner(\"examples/no_changelog/galaxy.yml\", rules=collection)\n+ errs = bad_runner.run()\n+ assert len(errs) == 1\n", "issue": "no-changelog: check_collection_changelog\ncheck the collection structure to confirm if there is a changelog file present at the root of the directory. \r\n\r\nrule should be optional and only enabled at users discretion. Error level should be Warn/Info (0)\r\n\r\nbased off of code found [here ](https://github.com/ansible/galaxy-importer/blob/master/galaxy_importer/loaders/collection.py#L119)\n", "before_files": [{"content": "\"\"\"Implementation of GalaxyRule.\"\"\"\nfrom __future__ import annotations\n\nimport sys\nfrom functools import total_ordering\nfrom typing import TYPE_CHECKING, Any\n\nfrom ansiblelint.constants import LINE_NUMBER_KEY\nfrom ansiblelint.errors import MatchError\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from ansiblelint.file_utils import Lintable\n\n\nclass GalaxyRule(AnsibleLintRule):\n \"\"\"Rule for checking collection version is greater than 1.0.0.\"\"\"\n\n id = \"galaxy\"\n description = \"Confirm via galaxy.yml file if collection version is greater than or equal to 1.0.0\"\n severity = \"MEDIUM\"\n tags = [\"metadata\", \"opt-in\", \"experimental\"]\n version_added = \"v6.6.0 (last update)\"\n\n def matchplay(self, file: Lintable, data: dict[str, Any]) -> list[MatchError]:\n \"\"\"Return matches found for a specific play (entry in playbook).\"\"\"\n if file.kind != \"galaxy\": # type: ignore\n return []\n if \"version\" not in data:\n return [\n self.create_matcherror(\n message=\"galaxy.yaml should have version tag.\",\n linenumber=data[LINE_NUMBER_KEY],\n tag=\"galaxy[version-missing]\",\n filename=file,\n )\n ]\n version = data.get(\"version\")\n if Version(version) < Version(\"1.0.0\"):\n return [\n self.create_matcherror(\n message=\"collection version should be greater than or equal to 1.0.0\",\n # pylint: disable=protected-access\n linenumber=version._line_number,\n tag=\"galaxy[version-incorrect]\",\n filename=file,\n )\n ]\n return []\n\n\n@total_ordering\nclass Version:\n \"\"\"Simple class to compare arbitrary versions.\"\"\"\n\n def __init__(self, version_string: str):\n \"\"\"Construct a Version object.\"\"\"\n self.components = version_string.split(\".\")\n\n def __eq__(self, other: object) -> bool:\n \"\"\"Implement equality comparison.\"\"\"\n other = _coerce(other)\n if not isinstance(other, Version):\n return NotImplemented\n\n return self.components == other.components\n\n def __lt__(self, other: Version) -> bool:\n \"\"\"Implement lower-than operation.\"\"\"\n other = _coerce(other)\n if not isinstance(other, Version):\n return NotImplemented\n\n return self.components < other.components\n\n\ndef _coerce(other: object) -> Version:\n if isinstance(other, str):\n other = Version(other)\n if isinstance(other, (int, float)):\n other = Version(str(other))\n if isinstance(other, Version):\n return other\n raise NotImplementedError(f\"Unable to coerce object type {type(other)} to Version\")\n\n\nif \"pytest\" in sys.modules: # noqa: C901\n\n from ansiblelint.rules import RulesCollection\n from ansiblelint.runner import Runner\n\n def test_galaxy_collection_version_positive() -> None:\n \"\"\"Positive test for collection version in galaxy.\"\"\"\n collection = RulesCollection()\n collection.register(GalaxyRule())\n success = \"examples/collection/galaxy.yml\"\n good_runner = Runner(success, rules=collection)\n assert [] == good_runner.run()\n\n def test_galaxy_collection_version_negative() -> None:\n \"\"\"Negative test for collection version in galaxy.\"\"\"\n collection = RulesCollection()\n collection.register(GalaxyRule())\n failure = \"examples/meta/galaxy.yml\"\n bad_runner = Runner(failure, rules=collection)\n errs = bad_runner.run()\n assert len(errs) == 1\n", "path": "src/ansiblelint/rules/galaxy.py"}]} | 1,632 | 889 |
gh_patches_debug_4648 | rasdani/github-patches | git_diff | kivy__kivy-2196 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[examples] installation of latest kivy-examples package fails
Looks like there is a simple syntax problem in an example...
Beleave it is not py3 compatible :/
Package: kivy-examples (1.8.1-daily0+201405040547-2023-testing42~ubuntu14.04.1)
```
File "/usr/share/kivy-examples/widgets/compound_selection.py", line 17
print [x.text for x in self.selected_nodes]
^
SyntaxError: invalid syntax
```
</issue>
<code>
[start of examples/widgets/compound_selection.py]
1 from kivy.uix.gridlayout import GridLayout
2 from kivy.uix.button import Button
3 from kivy.uix.behaviors import CompoundSelectionBehavior
4 from kivy.app import runTouchApp
5 from kivy.core.window import Window
6
7
8 class SelectableGrid(CompoundSelectionBehavior, GridLayout):
9
10 def __init__(self, **kwargs):
11 super(SelectableGrid, self).__init__(**kwargs)
12 keyboard = Window.request_keyboard(None, self)
13 keyboard.bind(on_key_down=self.select_with_key_down,
14 on_key_up=self.select_with_key_up)
15
16 def print_selection(*l):
17 print(x.text for x in self.selected_nodes)
18 self.bind(selected_nodes=print_selection)
19
20 def goto_node(self, key, last_node, last_node_idx):
21 ''' This function is used to go to the node by typing the number
22 of the text of the button.
23 '''
24 node, idx = super(SelectableGrid, self).goto_node(key, last_node,
25 last_node_idx)
26 if node == last_node:
27 children = self.children
28 for i in range(len(children)):
29 if children[i].text == key:
30 return children[i], i
31 return node, idx
32
33 def select_node(self, node):
34 node.background_color = (1, 0, 0, 1)
35 return super(SelectableGrid, self).select_node(node)
36
37 def deselect_node(self, node):
38 node.background_color = (1, 1, 1, 1)
39 super(SelectableGrid, self).deselect_node(node)
40
41 def do_touch(self, instance, touch):
42 if ('button' in touch.profile and touch.button in
43 ('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\
44 instance.collide_point(*touch.pos):
45 self.select_with_touch(instance, touch)
46 else:
47 return False
48 return True
49
50
51 root = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)
52 for i in range(40):
53 c = Button(text=str(i))
54 c.bind(on_touch_down=root.do_touch)
55 root.add_widget(c)
56
57 runTouchApp(root)
58
[end of examples/widgets/compound_selection.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/widgets/compound_selection.py b/examples/widgets/compound_selection.py
--- a/examples/widgets/compound_selection.py
+++ b/examples/widgets/compound_selection.py
@@ -14,7 +14,7 @@
on_key_up=self.select_with_key_up)
def print_selection(*l):
- print(x.text for x in self.selected_nodes)
+ print([x.text for x in self.selected_nodes])
self.bind(selected_nodes=print_selection)
def goto_node(self, key, last_node, last_node_idx):
| {"golden_diff": "diff --git a/examples/widgets/compound_selection.py b/examples/widgets/compound_selection.py\n--- a/examples/widgets/compound_selection.py\n+++ b/examples/widgets/compound_selection.py\n@@ -14,7 +14,7 @@\n on_key_up=self.select_with_key_up)\n \n def print_selection(*l):\n- print(x.text for x in self.selected_nodes)\n+ print([x.text for x in self.selected_nodes])\n self.bind(selected_nodes=print_selection)\n \n def goto_node(self, key, last_node, last_node_idx):\n", "issue": "[examples] installation of latest kivy-examples package fails\nLooks like there is a simple syntax problem in an example...\nBeleave it is not py3 compatible :/\n\nPackage: kivy-examples (1.8.1-daily0+201405040547-2023-testing42~ubuntu14.04.1)\n\n```\n File \"/usr/share/kivy-examples/widgets/compound_selection.py\", line 17\n print [x.text for x in self.selected_nodes]\n ^\nSyntaxError: invalid syntax\n```\n\n", "before_files": [{"content": "from kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.button import Button\nfrom kivy.uix.behaviors import CompoundSelectionBehavior\nfrom kivy.app import runTouchApp\nfrom kivy.core.window import Window\n\n\nclass SelectableGrid(CompoundSelectionBehavior, GridLayout):\n\n def __init__(self, **kwargs):\n super(SelectableGrid, self).__init__(**kwargs)\n keyboard = Window.request_keyboard(None, self)\n keyboard.bind(on_key_down=self.select_with_key_down,\n on_key_up=self.select_with_key_up)\n\n def print_selection(*l):\n print(x.text for x in self.selected_nodes)\n self.bind(selected_nodes=print_selection)\n\n def goto_node(self, key, last_node, last_node_idx):\n ''' This function is used to go to the node by typing the number\n of the text of the button.\n '''\n node, idx = super(SelectableGrid, self).goto_node(key, last_node,\n last_node_idx)\n if node == last_node:\n children = self.children\n for i in range(len(children)):\n if children[i].text == key:\n return children[i], i\n return node, idx\n\n def select_node(self, node):\n node.background_color = (1, 0, 0, 1)\n return super(SelectableGrid, self).select_node(node)\n\n def deselect_node(self, node):\n node.background_color = (1, 1, 1, 1)\n super(SelectableGrid, self).deselect_node(node)\n\n def do_touch(self, instance, touch):\n if ('button' in touch.profile and touch.button in\n ('scrollup', 'scrolldown', 'scrollleft', 'scrollright')) or\\\n instance.collide_point(*touch.pos):\n self.select_with_touch(instance, touch)\n else:\n return False\n return True\n\n\nroot = SelectableGrid(cols=5, up_count=5, multiselect=True, scroll_count=1)\nfor i in range(40):\n c = Button(text=str(i))\n c.bind(on_touch_down=root.do_touch)\n root.add_widget(c)\n\nrunTouchApp(root)\n", "path": "examples/widgets/compound_selection.py"}]} | 1,239 | 117 |
gh_patches_debug_24461 | rasdani/github-patches | git_diff | pytorch__TensorRT-2375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🐛 [Bug] Issue in `ConstantFolder` where certain operators no longer exist in latest nightly
## Bug Description
[See here](https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63)
## To Reproduce
See recent PR
## Expected behavior
These tests should not fail.
## Environment
> Build information about Torch-TensorRT can be found by turning on debug messages
- Torch-TensorRT Version (e.g. 1.0.0): https://github.com/pytorch/TensorRT/commit/0ef47c78ce94f610ee2a3a5a62e6fb450d4a8b34
- PyTorch Version (e.g. 1.0): `2.2.0.dev20231009+cu121`
</issue>
<code>
[start of py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py]
1 import logging
2 from typing import Sequence
3
4 import torch
5 from torch_tensorrt._utils import sanitized_torch_version
6 from torch_tensorrt.dynamo.lowering.passes.pass_utils import (
7 clean_up_graph_after_modifications,
8 )
9
10 from packaging import version
11
12 # Modify import location of utilities based on Torch version
13 if version.parse(sanitized_torch_version()) < version.parse("2.1.1"):
14 from torch._inductor.freezing import ConstantFolder, replace_node_with_constant
15 else:
16 from torch._inductor.constant_folding import (
17 ConstantFolder,
18 replace_node_with_constant,
19 )
20
21 logger = logging.getLogger(__name__)
22
23
24 @torch.utils._python_dispatch._disable_current_modes() # type: ignore
25 def constant_fold(
26 gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor]
27 ) -> torch.fx.GraphModule:
28 """Adapted from:
29 https://github.com/pytorch/pytorch/blob/3a79621c9dce17f77fbddc06aab21f6bc477f313/torch/_inductor/freezing.py#L178-L197
30
31 Folds constants in the graph module, not skipping constructors
32
33 Modifies the graph in-place and replaces node with constants
34 """
35 cf = ConstantFolder(gm, skip_constructors=False)
36 cf.run()
37
38 for node, constant in cf.node_replacements.items():
39 replace_node_with_constant(gm, node, constant)
40
41 erased_params = []
42 for node in gm.graph.nodes:
43 # If get_attr node has no users, mark it for deletion
44 if node.op == "get_attr" and len(node.users) == 0:
45 # If the node's parameter is not a parameter of any other node, remove it
46 if not any(
47 other.target == node.target for other in gm.graph.nodes if other != node
48 ):
49 delattr(gm, node.target)
50 erased_params.append(node)
51
52 # Remove unused nodes from the graph
53 for node in erased_params:
54 gm.graph.erase_node(node)
55
56 gm = clean_up_graph_after_modifications(gm)
57
58 logger.debug(f"Graph after constant folding:\n{gm.graph}")
59
60 return gm
61
[end of py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py
--- a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py
+++ b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py
@@ -1,5 +1,5 @@
import logging
-from typing import Sequence
+from typing import Any, Sequence
import torch
from torch_tensorrt._utils import sanitized_torch_version
@@ -32,7 +32,7 @@
Modifies the graph in-place and replaces node with constants
"""
- cf = ConstantFolder(gm, skip_constructors=False)
+ cf = _TorchTensorRTConstantFolder(gm, skip_constructors=False)
cf.run()
for node, constant in cf.node_replacements.items():
@@ -58,3 +58,14 @@
logger.debug(f"Graph after constant folding:\n{gm.graph}")
return gm
+
+
+# TODO: Delete this class when the following code is fixed in nightly:
+# https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63
+class _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+ # TODO: Update this function when quantization is added
+ def is_impure(self, node: torch.fx.node.Node) -> bool:
+ return False
| {"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n--- a/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n+++ b/py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py\n@@ -1,5 +1,5 @@\n import logging\n-from typing import Sequence\n+from typing import Any, Sequence\n \n import torch\n from torch_tensorrt._utils import sanitized_torch_version\n@@ -32,7 +32,7 @@\n \n Modifies the graph in-place and replaces node with constants\n \"\"\"\n- cf = ConstantFolder(gm, skip_constructors=False)\n+ cf = _TorchTensorRTConstantFolder(gm, skip_constructors=False)\n cf.run()\n \n for node, constant in cf.node_replacements.items():\n@@ -58,3 +58,14 @@\n logger.debug(f\"Graph after constant folding:\\n{gm.graph}\")\n \n return gm\n+\n+\n+# TODO: Delete this class when the following code is fixed in nightly:\n+# https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63\n+class _TorchTensorRTConstantFolder(ConstantFolder): # type: ignore[misc]\n+ def __init__(self, *args: Any, **kwargs: Any) -> None:\n+ super().__init__(*args, **kwargs)\n+\n+ # TODO: Update this function when quantization is added\n+ def is_impure(self, node: torch.fx.node.Node) -> bool:\n+ return False\n", "issue": "\ud83d\udc1b [Bug] Issue in `ConstantFolder` where certain operators no longer exist in latest nightly\n## Bug Description\r\n\r\n[See here](https://github.com/pytorch/pytorch/blob/4b881b0da390c1290bb12850ef9daad6f6eb2cb6/torch/_inductor/constant_folding.py#L53-L63)\r\n\r\n## To Reproduce\r\n\r\nSee recent PR \r\n\r\n## Expected behavior\r\n\r\nThese tests should not fail.\r\n\r\n## Environment\r\n\r\n> Build information about Torch-TensorRT can be found by turning on debug messages\r\n\r\n - Torch-TensorRT Version (e.g. 1.0.0): https://github.com/pytorch/TensorRT/commit/0ef47c78ce94f610ee2a3a5a62e6fb450d4a8b34\r\n - PyTorch Version (e.g. 1.0): `2.2.0.dev20231009+cu121`\r\n\n", "before_files": [{"content": "import logging\nfrom typing import Sequence\n\nimport torch\nfrom torch_tensorrt._utils import sanitized_torch_version\nfrom torch_tensorrt.dynamo.lowering.passes.pass_utils import (\n clean_up_graph_after_modifications,\n)\n\nfrom packaging import version\n\n# Modify import location of utilities based on Torch version\nif version.parse(sanitized_torch_version()) < version.parse(\"2.1.1\"):\n from torch._inductor.freezing import ConstantFolder, replace_node_with_constant\nelse:\n from torch._inductor.constant_folding import (\n ConstantFolder,\n replace_node_with_constant,\n )\n\nlogger = logging.getLogger(__name__)\n\n\[email protected]._python_dispatch._disable_current_modes() # type: ignore\ndef constant_fold(\n gm: torch.fx.GraphModule, sample_inputs: Sequence[torch.Tensor]\n) -> torch.fx.GraphModule:\n \"\"\"Adapted from:\n https://github.com/pytorch/pytorch/blob/3a79621c9dce17f77fbddc06aab21f6bc477f313/torch/_inductor/freezing.py#L178-L197\n\n Folds constants in the graph module, not skipping constructors\n\n Modifies the graph in-place and replaces node with constants\n \"\"\"\n cf = ConstantFolder(gm, skip_constructors=False)\n cf.run()\n\n for node, constant in cf.node_replacements.items():\n replace_node_with_constant(gm, node, constant)\n\n erased_params = []\n for node in gm.graph.nodes:\n # If get_attr node has no users, mark it for deletion\n if node.op == \"get_attr\" and len(node.users) == 0:\n # If the node's parameter is not a parameter of any other node, remove it\n if not any(\n other.target == node.target for other in gm.graph.nodes if other != node\n ):\n delattr(gm, node.target)\n erased_params.append(node)\n\n # Remove unused nodes from the graph\n for node in erased_params:\n gm.graph.erase_node(node)\n\n gm = clean_up_graph_after_modifications(gm)\n\n logger.debug(f\"Graph after constant folding:\\n{gm.graph}\")\n\n return gm\n", "path": "py/torch_tensorrt/dynamo/lowering/passes/constant_folding.py"}]} | 1,389 | 398 |
gh_patches_debug_26365 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update error message on check availability section in the request form
### Issue description
To keep language consistent, we should update the error message in the check availability section in the request form from:
This
> That domain isn't available. Try entering another one. Contact us if you need help coming up with a domain.
To this
> That domain isn’t available. [Read more about choosing your .gov domain.](https://beta.get.gov/domains/choosing/)
Note that the hyperlink in the new message should open in a new tab so as to not take the user our of the request flow
### Acceptance criteria
- [ ] error message is updated
- [ ] link opens in new tab
### Additional context
_No response_
### Links to other issues
_No response_
</issue>
<code>
[start of src/api/views.py]
1 """Internal API views"""
2 from django.apps import apps
3 from django.views.decorators.http import require_http_methods
4 from django.http import JsonResponse
5
6 import requests
7
8 from login_required import login_not_required
9
10 from cachetools.func import ttl_cache
11
12
13 DOMAIN_FILE_URL = "https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv"
14
15
16 DOMAIN_API_MESSAGES = {
17 "required": "Enter the .gov domain you want. Don’t include “www” or “.gov.”"
18 " For example, if you want www.city.gov, you would enter “city”"
19 " (without the quotes).",
20 "extra_dots": "Enter the .gov domain you want without any periods.",
21 "unavailable": "That domain isn’t available. Try entering another one."
22 " Contact us if you need help coming up with a domain.",
23 "invalid": "Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).",
24 "success": "That domain is available!",
25 "error": "Error finding domain availability.",
26 }
27
28
29 # this file doesn't change that often, nor is it that big, so cache the result
30 # in memory for ten minutes
31 @ttl_cache(ttl=600)
32 def _domains():
33 """Return a list of the current .gov domains.
34
35 Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,
36 lowercase everything and return the list.
37 """
38 DraftDomain = apps.get_model("registrar.DraftDomain")
39 # 5 second timeout
40 file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text
41 domains = set()
42 # skip the first line
43 for line in file_contents.splitlines()[1:]:
44 # get the domain before the first comma
45 domain = line.split(",", 1)[0]
46 # sanity-check the string we got from the file here
47 if DraftDomain.string_could_be_domain(domain):
48 # lowercase everything when we put it in domains
49 domains.add(domain.lower())
50 return domains
51
52
53 def check_domain_available(domain):
54 """Return true if the given domain is available.
55
56 The given domain is lowercased to match against the domains list. If the
57 given domain doesn't end with .gov, ".gov" is added when looking for
58 a match.
59 """
60 Domain = apps.get_model("registrar.Domain")
61 try:
62 if domain.endswith(".gov"):
63 return Domain.available(domain)
64 else:
65 # domain search string doesn't end with .gov, add it on here
66 return Domain.available(domain + ".gov")
67 except Exception:
68 return False
69
70
71 @require_http_methods(["GET"])
72 @login_not_required
73 def available(request, domain=""):
74 """Is a given domain available or not.
75
76 Response is a JSON dictionary with the key "available" and value true or
77 false.
78 """
79 DraftDomain = apps.get_model("registrar.DraftDomain")
80 # validate that the given domain could be a domain name and fail early if
81 # not.
82 if not (DraftDomain.string_could_be_domain(domain) or DraftDomain.string_could_be_domain(domain + ".gov")):
83 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["invalid"]})
84 # a domain is available if it is NOT in the list of current domains
85 try:
86 if check_domain_available(domain):
87 return JsonResponse({"available": True, "message": DOMAIN_API_MESSAGES["success"]})
88 else:
89 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["unavailable"]})
90 except Exception:
91 return JsonResponse({"available": False, "message": DOMAIN_API_MESSAGES["error"]})
92
[end of src/api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/api/views.py b/src/api/views.py
--- a/src/api/views.py
+++ b/src/api/views.py
@@ -2,6 +2,9 @@
from django.apps import apps
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
+from django.utils.safestring import mark_safe
+
+from registrar.templatetags.url_helpers import public_site_url
import requests
@@ -18,8 +21,13 @@
" For example, if you want www.city.gov, you would enter “city”"
" (without the quotes).",
"extra_dots": "Enter the .gov domain you want without any periods.",
- "unavailable": "That domain isn’t available. Try entering another one."
- " Contact us if you need help coming up with a domain.",
+ # message below is considered safe; no user input can be inserted into the message
+ # body; public_site_url() function reads from local app settings and therefore safe
+ "unavailable": mark_safe( # nosec
+ "That domain isn’t available. "
+ "<a class='usa-link' href='{}' target='_blank'>"
+ "Read more about choosing your .gov domain.</a>".format(public_site_url("domains/choosing"))
+ ),
"invalid": "Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).",
"success": "That domain is available!",
"error": "Error finding domain availability.",
| {"golden_diff": "diff --git a/src/api/views.py b/src/api/views.py\n--- a/src/api/views.py\n+++ b/src/api/views.py\n@@ -2,6 +2,9 @@\n from django.apps import apps\n from django.views.decorators.http import require_http_methods\n from django.http import JsonResponse\n+from django.utils.safestring import mark_safe\n+\n+from registrar.templatetags.url_helpers import public_site_url\n \n import requests\n \n@@ -18,8 +21,13 @@\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n- \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n- \" Contact us if you need help coming up with a domain.\",\n+ # message below is considered safe; no user input can be inserted into the message\n+ # body; public_site_url() function reads from local app settings and therefore safe\n+ \"unavailable\": mark_safe( # nosec\n+ \"That domain isn\u2019t available. \"\n+ \"<a class='usa-link' href='{}' target='_blank'>\"\n+ \"Read more about choosing your .gov domain.</a>\".format(public_site_url(\"domains/choosing\"))\n+ ),\n \"invalid\": \"Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n \"error\": \"Error finding domain availability.\",\n", "issue": "Update error message on check availability section in the request form\n### Issue description\n\nTo keep language consistent, we should update the error message in the check availability section in the request form from:\r\n\r\nThis\r\n\r\n> That domain isn't available. Try entering another one. Contact us if you need help coming up with a domain. \r\n\r\n\r\nTo this\r\n\r\n> That domain isn\u2019t available. [Read more about choosing your .gov domain.](https://beta.get.gov/domains/choosing/) \r\n\r\n\r\nNote that the hyperlink in the new message should open in a new tab so as to not take the user our of the request flow\n\n### Acceptance criteria\n\n- [ ] error message is updated\r\n- [ ] link opens in new tab\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\n_No response_\n", "before_files": [{"content": "\"\"\"Internal API views\"\"\"\nfrom django.apps import apps\nfrom django.views.decorators.http import require_http_methods\nfrom django.http import JsonResponse\n\nimport requests\n\nfrom login_required import login_not_required\n\nfrom cachetools.func import ttl_cache\n\n\nDOMAIN_FILE_URL = \"https://raw.githubusercontent.com/cisagov/dotgov-data/main/current-full.csv\"\n\n\nDOMAIN_API_MESSAGES = {\n \"required\": \"Enter the .gov domain you want. Don\u2019t include \u201cwww\u201d or \u201c.gov.\u201d\"\n \" For example, if you want www.city.gov, you would enter \u201ccity\u201d\"\n \" (without the quotes).\",\n \"extra_dots\": \"Enter the .gov domain you want without any periods.\",\n \"unavailable\": \"That domain isn\u2019t available. Try entering another one.\"\n \" Contact us if you need help coming up with a domain.\",\n \"invalid\": \"Enter a domain using only letters, numbers, or hyphens (though we don't recommend using hyphens).\",\n \"success\": \"That domain is available!\",\n \"error\": \"Error finding domain availability.\",\n}\n\n\n# this file doesn't change that often, nor is it that big, so cache the result\n# in memory for ten minutes\n@ttl_cache(ttl=600)\ndef _domains():\n \"\"\"Return a list of the current .gov domains.\n\n Fetch a file from DOMAIN_FILE_URL, parse the CSV for the domain,\n lowercase everything and return the list.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # 5 second timeout\n file_contents = requests.get(DOMAIN_FILE_URL, timeout=5).text\n domains = set()\n # skip the first line\n for line in file_contents.splitlines()[1:]:\n # get the domain before the first comma\n domain = line.split(\",\", 1)[0]\n # sanity-check the string we got from the file here\n if DraftDomain.string_could_be_domain(domain):\n # lowercase everything when we put it in domains\n domains.add(domain.lower())\n return domains\n\n\ndef check_domain_available(domain):\n \"\"\"Return true if the given domain is available.\n\n The given domain is lowercased to match against the domains list. If the\n given domain doesn't end with .gov, \".gov\" is added when looking for\n a match.\n \"\"\"\n Domain = apps.get_model(\"registrar.Domain\")\n try:\n if domain.endswith(\".gov\"):\n return Domain.available(domain)\n else:\n # domain search string doesn't end with .gov, add it on here\n return Domain.available(domain + \".gov\")\n except Exception:\n return False\n\n\n@require_http_methods([\"GET\"])\n@login_not_required\ndef available(request, domain=\"\"):\n \"\"\"Is a given domain available or not.\n\n Response is a JSON dictionary with the key \"available\" and value true or\n false.\n \"\"\"\n DraftDomain = apps.get_model(\"registrar.DraftDomain\")\n # validate that the given domain could be a domain name and fail early if\n # not.\n if not (DraftDomain.string_could_be_domain(domain) or DraftDomain.string_could_be_domain(domain + \".gov\")):\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"invalid\"]})\n # a domain is available if it is NOT in the list of current domains\n try:\n if check_domain_available(domain):\n return JsonResponse({\"available\": True, \"message\": DOMAIN_API_MESSAGES[\"success\"]})\n else:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"unavailable\"]})\n except Exception:\n return JsonResponse({\"available\": False, \"message\": DOMAIN_API_MESSAGES[\"error\"]})\n", "path": "src/api/views.py"}]} | 1,667 | 339 |
gh_patches_debug_34044 | rasdani/github-patches | git_diff | TileDB-Inc__TileDB-Py-246 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
likely race condition in DenseArray.__new__
Test program below exposes (what I believe is a) race in class handling in DenseArray. The test program is completely artificial - it just spins up 5 threads, each of which open an S3 object and ask its shape. Without a sleep to sequence their execution, it fails inside the DenseArray.__new__ method.
Run with the sleep statement in worker(), I see:
```
python cctest.py
starting... 0
starting... 1
working... 0
ending... 0 (1305994, 972)
starting... 2
working... 1
ending... 1 (1305994, 972)
starting... 3
working... 2
ending... 2 (1305994, 972)
starting... 4
working... 3
ending... 3 (1305994, 972)
working... 4
ending... 4 (1305994, 972)
all done [(1305994, 972), (1305994, 972), (1305994, 972), (1305994, 972), (1305994, 972)]
```
Run with the sleep in worker removed/commented out:
```
$ python cctest.py
starting... 0
starting... 1
starting... 2
working... 0
ending... 0 (1305994, 972)
starting... 3
working... 2
ending... 2 (1305994, 972)
starting... 4
working... 3
ending... 3 (1305994, 972)
working... 4
ending... 4 (1305994, 972)
Traceback (most recent call last):
File "cctest.py", line 33, in <module>
main()
File "cctest.py", line 27, in main
res = [f.result() for f in concurrent.futures.as_completed(futures)]
File "cctest.py", line 27, in <listcomp>
res = [f.result() for f in concurrent.futures.as_completed(futures)]
File "/usr/lib64/python3.7/concurrent/futures/_base.py", line 428, in result
return self.__get_result()
File "/usr/lib64/python3.7/concurrent/futures/_base.py", line 384, in __get_result
raise self._exception
File "/usr/lib64/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "cctest.py", line 11, in worker
with tiledb.DenseArray(uri) as A:
File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/array.py", line 10, in __new__
DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)
TypeError: duplicate base class CloudArray
```
Test case:
```
import tiledb
import time
import numpy as np
import concurrent.futures
def worker(n, uri):
print('starting...', n)
# time.sleep(n)
with tiledb.DenseArray(uri) as A:
print('working...', n)
res = A.shape
print('ending...', n, res)
return res
def main():
executor_cls = concurrent.futures.ThreadPoolExecutor
uri = "s3://tiledb-eval/10x_mouse_13MM_processed-Xr3zstd.cxg/X"
with executor_cls(max_workers=2) as executor:
futures = [executor.submit(worker, n, uri) for n in range(0, 5)]
res = [f.result() for f in concurrent.futures.as_completed(futures)]
print('all done', res)
if __name__ == '__main__':
main()
```
</issue>
<code>
[start of tiledb/array.py]
1 from .libtiledb import DenseArrayImpl, SparseArrayImpl
2
3 class DenseArray(DenseArrayImpl):
4 _mixin_init = False
5
6 def __new__(cls, *args, **kwargs):
7 if not cls._mixin_init:
8 try:
9 from tiledb.cloud import cloudarray
10 DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)
11 except ImportError:
12 pass
13 finally:
14 cls._mixin_init = True
15
16 obj = super(DenseArray, cls).__new__(cls, *args, **kwargs)
17 return obj
18
19 class SparseArray(SparseArrayImpl):
20 _mixin_init = False
21
22 def __new__(cls, *args, **kwargs):
23 if not cls._mixin_init:
24 try:
25 from tiledb.cloud import cloudarray
26 SparseArray.__bases__ = SparseArray.__bases__ + (cloudarray.CloudArray,)
27 except ImportError:
28 pass
29 finally:
30 cls._mixin_init = True
31
32 obj = super(SparseArray, cls).__new__(cls, *args, **kwargs)
33 return obj
34
[end of tiledb/array.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tiledb/array.py b/tiledb/array.py
--- a/tiledb/array.py
+++ b/tiledb/array.py
@@ -1,17 +1,29 @@
from .libtiledb import DenseArrayImpl, SparseArrayImpl
+# Extensible (pure Python) array class definitions inheriting from the
+# Cython implemention. The cloudarray mix-in adds optional functionality
+# for registering arrays and executing functions on the
+
+# NOTE: the mixin import must be inside the __new__ initializer because it
+# needs to be deferred. tiledb.cloud is not yet known to the importer
+# when this code is imported.
+# TODO: might be possible to work-around/simplify by using
+# import meta-hooks instead.
+
class DenseArray(DenseArrayImpl):
_mixin_init = False
def __new__(cls, *args, **kwargs):
if not cls._mixin_init:
+ # must set before importing, because import is not thread-safe
+ # https://github.com/TileDB-Inc/TileDB-Py/issues/244
+ cls._mixin_init = True
try:
from tiledb.cloud import cloudarray
DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)
+ DenseArray.__doc__ = DenseArrayImpl.__doc__
except ImportError:
pass
- finally:
- cls._mixin_init = True
obj = super(DenseArray, cls).__new__(cls, *args, **kwargs)
return obj
@@ -21,13 +33,13 @@
def __new__(cls, *args, **kwargs):
if not cls._mixin_init:
+ cls._mixin_init = True
try:
from tiledb.cloud import cloudarray
SparseArray.__bases__ = SparseArray.__bases__ + (cloudarray.CloudArray,)
+ SparseArray.__doc__ = DenseArrayImpl.__doc__
except ImportError:
pass
- finally:
- cls._mixin_init = True
obj = super(SparseArray, cls).__new__(cls, *args, **kwargs)
return obj
| {"golden_diff": "diff --git a/tiledb/array.py b/tiledb/array.py\n--- a/tiledb/array.py\n+++ b/tiledb/array.py\n@@ -1,17 +1,29 @@\n from .libtiledb import DenseArrayImpl, SparseArrayImpl\n \n+# Extensible (pure Python) array class definitions inheriting from the\n+# Cython implemention. The cloudarray mix-in adds optional functionality\n+# for registering arrays and executing functions on the\n+\n+# NOTE: the mixin import must be inside the __new__ initializer because it\n+# needs to be deferred. tiledb.cloud is not yet known to the importer\n+# when this code is imported.\n+# TODO: might be possible to work-around/simplify by using\n+# import meta-hooks instead.\n+\n class DenseArray(DenseArrayImpl):\n _mixin_init = False\n \n def __new__(cls, *args, **kwargs):\n if not cls._mixin_init:\n+ # must set before importing, because import is not thread-safe\n+ # https://github.com/TileDB-Inc/TileDB-Py/issues/244\n+ cls._mixin_init = True\n try:\n from tiledb.cloud import cloudarray\n DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)\n+ DenseArray.__doc__ = DenseArrayImpl.__doc__\n except ImportError:\n pass\n- finally:\n- cls._mixin_init = True\n \n obj = super(DenseArray, cls).__new__(cls, *args, **kwargs)\n return obj\n@@ -21,13 +33,13 @@\n \n def __new__(cls, *args, **kwargs):\n if not cls._mixin_init:\n+ cls._mixin_init = True\n try:\n from tiledb.cloud import cloudarray\n SparseArray.__bases__ = SparseArray.__bases__ + (cloudarray.CloudArray,)\n+ SparseArray.__doc__ = DenseArrayImpl.__doc__\n except ImportError:\n pass\n- finally:\n- cls._mixin_init = True\n \n obj = super(SparseArray, cls).__new__(cls, *args, **kwargs)\n return obj\n", "issue": "likely race condition in DenseArray.__new__\n\r\nTest program below exposes (what I believe is a) race in class handling in DenseArray. The test program is completely artificial - it just spins up 5 threads, each of which open an S3 object and ask its shape. Without a sleep to sequence their execution, it fails inside the DenseArray.__new__ method.\r\n\r\nRun with the sleep statement in worker(), I see:\r\n```\r\npython cctest.py\r\nstarting... 0\r\nstarting... 1\r\nworking... 0\r\nending... 0 (1305994, 972)\r\nstarting... 2\r\nworking... 1\r\nending... 1 (1305994, 972)\r\nstarting... 3\r\nworking... 2\r\nending... 2 (1305994, 972)\r\nstarting... 4\r\nworking... 3\r\nending... 3 (1305994, 972)\r\nworking... 4\r\nending... 4 (1305994, 972)\r\nall done [(1305994, 972), (1305994, 972), (1305994, 972), (1305994, 972), (1305994, 972)]\r\n```\r\n\r\nRun with the sleep in worker removed/commented out:\r\n```\r\n$ python cctest.py\r\nstarting... 0\r\nstarting... 1\r\nstarting... 2\r\nworking... 0\r\nending... 0 (1305994, 972)\r\nstarting... 3\r\nworking... 2\r\nending... 2 (1305994, 972)\r\nstarting... 4\r\nworking... 3\r\nending... 3 (1305994, 972)\r\nworking... 4\r\nending... 4 (1305994, 972)\r\nTraceback (most recent call last):\r\n File \"cctest.py\", line 33, in <module>\r\n main()\r\n File \"cctest.py\", line 27, in main\r\n res = [f.result() for f in concurrent.futures.as_completed(futures)]\r\n File \"cctest.py\", line 27, in <listcomp>\r\n res = [f.result() for f in concurrent.futures.as_completed(futures)]\r\n File \"/usr/lib64/python3.7/concurrent/futures/_base.py\", line 428, in result\r\n return self.__get_result()\r\n File \"/usr/lib64/python3.7/concurrent/futures/_base.py\", line 384, in __get_result\r\n raise self._exception\r\n File \"/usr/lib64/python3.7/concurrent/futures/thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"cctest.py\", line 11, in worker\r\n with tiledb.DenseArray(uri) as A:\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/array.py\", line 10, in __new__\r\n DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)\r\nTypeError: duplicate base class CloudArray\r\n```\r\n\r\nTest case:\r\n```\r\nimport tiledb\r\nimport time\r\nimport numpy as np\r\nimport concurrent.futures\r\n\r\n\r\ndef worker(n, uri):\r\n print('starting...', n)\r\n # time.sleep(n)\r\n\r\n with tiledb.DenseArray(uri) as A:\r\n print('working...', n)\r\n res = A.shape\r\n\r\n print('ending...', n, res)\r\n return res\r\n\r\n\r\ndef main():\r\n executor_cls = concurrent.futures.ThreadPoolExecutor\r\n\r\n uri = \"s3://tiledb-eval/10x_mouse_13MM_processed-Xr3zstd.cxg/X\"\r\n\r\n with executor_cls(max_workers=2) as executor:\r\n futures = [executor.submit(worker, n, uri) for n in range(0, 5)]\r\n res = [f.result() for f in concurrent.futures.as_completed(futures)]\r\n\r\n print('all done', res)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\n", "before_files": [{"content": "from .libtiledb import DenseArrayImpl, SparseArrayImpl\n\nclass DenseArray(DenseArrayImpl):\n _mixin_init = False\n\n def __new__(cls, *args, **kwargs):\n if not cls._mixin_init:\n try:\n from tiledb.cloud import cloudarray\n DenseArray.__bases__ = DenseArray.__bases__ + (cloudarray.CloudArray,)\n except ImportError:\n pass\n finally:\n cls._mixin_init = True\n\n obj = super(DenseArray, cls).__new__(cls, *args, **kwargs)\n return obj\n\nclass SparseArray(SparseArrayImpl):\n _mixin_init = False\n\n def __new__(cls, *args, **kwargs):\n if not cls._mixin_init:\n try:\n from tiledb.cloud import cloudarray\n SparseArray.__bases__ = SparseArray.__bases__ + (cloudarray.CloudArray,)\n except ImportError:\n pass\n finally:\n cls._mixin_init = True\n\n obj = super(SparseArray, cls).__new__(cls, *args, **kwargs)\n return obj\n", "path": "tiledb/array.py"}]} | 1,772 | 478 |
gh_patches_debug_13110 | rasdani/github-patches | git_diff | Mailu__Mailu-1863 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Radicale password check fails (length limited?)
So far, the webdav radicale is protected with basic auth (apparently htaccess or similar).
If user password > 8 then it fails to connect.
we should remove this limitation to let use stong passwords
</issue>
<code>
[start of core/admin/mailu/internal/views/auth.py]
1 from mailu import models, utils
2 from mailu.internal import internal, nginx
3 from flask import current_app as app
4
5 import flask
6 import flask_login
7 import base64
8 import ipaddress
9
10
11 @internal.route("/auth/email")
12 def nginx_authentication():
13 """ Main authentication endpoint for Nginx email server
14 """
15 limiter = utils.limiter.get_limiter(app.config["AUTH_RATELIMIT"], "auth-ip")
16 client_ip = flask.request.headers["Client-Ip"]
17 if not limiter.test(client_ip):
18 response = flask.Response()
19 response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'
20 response.headers['Auth-Error-Code'] = '451 4.3.2'
21 if int(flask.request.headers['Auth-Login-Attempt']) < 10:
22 response.headers['Auth-Wait'] = '3'
23 return response
24 headers = nginx.handle_authentication(flask.request.headers)
25 response = flask.Response()
26 for key, value in headers.items():
27 response.headers[key] = str(value)
28 if ("Auth-Status" not in headers) or (headers["Auth-Status"] != "OK"):
29 limit_subnet = str(app.config["AUTH_RATELIMIT_SUBNET"]) != 'False'
30 subnet = ipaddress.ip_network(app.config["SUBNET"])
31 if limit_subnet or ipaddress.ip_address(client_ip) not in subnet:
32 limiter.hit(flask.request.headers["Client-Ip"])
33 return response
34
35
36 @internal.route("/auth/admin")
37 def admin_authentication():
38 """ Fails if the user is not an authenticated admin.
39 """
40 if (not flask_login.current_user.is_anonymous
41 and flask_login.current_user.global_admin
42 and flask_login.current_user.enabled):
43 return ""
44 return flask.abort(403)
45
46
47 @internal.route("/auth/basic")
48 def basic_authentication():
49 """ Tries to authenticate using the Authorization header.
50 """
51 authorization = flask.request.headers.get("Authorization")
52 if authorization and authorization.startswith("Basic "):
53 encoded = authorization.replace("Basic ", "")
54 user_email, password = base64.b64decode(encoded).split(b":")
55 user = models.User.query.get(user_email.decode("utf8"))
56 if user and user.enabled and user.check_password(password.decode("utf8")):
57 response = flask.Response()
58 response.headers["X-User"] = user.email
59 return response
60 response = flask.Response(status=401)
61 response.headers["WWW-Authenticate"] = 'Basic realm="Login Required"'
62 return response
63
[end of core/admin/mailu/internal/views/auth.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py
--- a/core/admin/mailu/internal/views/auth.py
+++ b/core/admin/mailu/internal/views/auth.py
@@ -51,7 +51,7 @@
authorization = flask.request.headers.get("Authorization")
if authorization and authorization.startswith("Basic "):
encoded = authorization.replace("Basic ", "")
- user_email, password = base64.b64decode(encoded).split(b":")
+ user_email, password = base64.b64decode(encoded).split(b":", 1)
user = models.User.query.get(user_email.decode("utf8"))
if user and user.enabled and user.check_password(password.decode("utf8")):
response = flask.Response()
| {"golden_diff": "diff --git a/core/admin/mailu/internal/views/auth.py b/core/admin/mailu/internal/views/auth.py\n--- a/core/admin/mailu/internal/views/auth.py\n+++ b/core/admin/mailu/internal/views/auth.py\n@@ -51,7 +51,7 @@\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n- user_email, password = base64.b64decode(encoded).split(b\":\")\n+ user_email, password = base64.b64decode(encoded).split(b\":\", 1)\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if user and user.enabled and user.check_password(password.decode(\"utf8\")):\n response = flask.Response()\n", "issue": "Radicale password check fails (length limited?)\nSo far, the webdav radicale is protected with basic auth (apparently htaccess or similar).\r\nIf user password > 8 then it fails to connect. \r\nwe should remove this limitation to let use stong passwords\n", "before_files": [{"content": "from mailu import models, utils\nfrom mailu.internal import internal, nginx\nfrom flask import current_app as app\n\nimport flask\nimport flask_login\nimport base64\nimport ipaddress\n\n\[email protected](\"/auth/email\")\ndef nginx_authentication():\n \"\"\" Main authentication endpoint for Nginx email server\n \"\"\"\n limiter = utils.limiter.get_limiter(app.config[\"AUTH_RATELIMIT\"], \"auth-ip\")\n client_ip = flask.request.headers[\"Client-Ip\"]\n if not limiter.test(client_ip):\n response = flask.Response()\n response.headers['Auth-Status'] = 'Authentication rate limit from one source exceeded'\n response.headers['Auth-Error-Code'] = '451 4.3.2'\n if int(flask.request.headers['Auth-Login-Attempt']) < 10:\n response.headers['Auth-Wait'] = '3'\n return response\n headers = nginx.handle_authentication(flask.request.headers)\n response = flask.Response()\n for key, value in headers.items():\n response.headers[key] = str(value)\n if (\"Auth-Status\" not in headers) or (headers[\"Auth-Status\"] != \"OK\"):\n limit_subnet = str(app.config[\"AUTH_RATELIMIT_SUBNET\"]) != 'False'\n subnet = ipaddress.ip_network(app.config[\"SUBNET\"])\n if limit_subnet or ipaddress.ip_address(client_ip) not in subnet:\n limiter.hit(flask.request.headers[\"Client-Ip\"])\n return response\n\n\[email protected](\"/auth/admin\")\ndef admin_authentication():\n \"\"\" Fails if the user is not an authenticated admin.\n \"\"\"\n if (not flask_login.current_user.is_anonymous\n and flask_login.current_user.global_admin\n and flask_login.current_user.enabled):\n return \"\"\n return flask.abort(403)\n\n\[email protected](\"/auth/basic\")\ndef basic_authentication():\n \"\"\" Tries to authenticate using the Authorization header.\n \"\"\"\n authorization = flask.request.headers.get(\"Authorization\")\n if authorization and authorization.startswith(\"Basic \"):\n encoded = authorization.replace(\"Basic \", \"\")\n user_email, password = base64.b64decode(encoded).split(b\":\")\n user = models.User.query.get(user_email.decode(\"utf8\"))\n if user and user.enabled and user.check_password(password.decode(\"utf8\")):\n response = flask.Response()\n response.headers[\"X-User\"] = user.email\n return response\n response = flask.Response(status=401)\n response.headers[\"WWW-Authenticate\"] = 'Basic realm=\"Login Required\"'\n return response\n", "path": "core/admin/mailu/internal/views/auth.py"}]} | 1,258 | 166 |
gh_patches_debug_1340 | rasdani/github-patches | git_diff | nilearn__nilearn-3337 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spelling Error
<!--Describe your proposed enhancement in detail.-->
I think the authors meant to describe ADHD but have written ADHD as AHDH. It is just a simple spelling or typographic error.
<!--List any pages that would be impacted by the enhancement.-->
### Affected pages
1. https://nilearn.github.io/dev/auto_examples/04_glm_first_level/plot_adhd_dmn.html#sphx-glr-auto-examples-04-glm-first-level-plot-adhd-dmn-py
2. https://nilearn.github.io/dev/glm/first_level_model.html#fitting-a-first-level-model
</issue>
<code>
[start of examples/04_glm_first_level/plot_adhd_dmn.py]
1 """Default Mode Network extraction of AHDH dataset
2 ===============================================
3
4 This example shows a full step-by-step workflow of fitting a GLM to data
5 extracted from a seed on the Posterior Cingulate Cortex and saving the results.
6
7 More specifically:
8
9 1. A sequence of fMRI volumes are loaded.
10 2. A design matrix with the Posterior Cingulate Cortex seed is defined.
11 3. A GLM is applied to the dataset (effect/covariance, then contrast estimation).
12 4. The Default Mode Network is displayed.
13
14 .. include:: ../../../examples/masker_note.rst
15
16 """
17 import numpy as np
18
19 from nilearn import datasets, plotting
20 from nilearn.maskers import NiftiSpheresMasker
21
22 from nilearn.glm.first_level import FirstLevelModel
23 from nilearn.glm.first_level import make_first_level_design_matrix
24
25 #########################################################################
26 # Prepare data and analysis parameters
27 # -------------------------------------
28 # Prepare the data.
29 adhd_dataset = datasets.fetch_adhd(n_subjects=1)
30
31 # Prepare timing
32 t_r = 2.
33 slice_time_ref = 0.
34 n_scans = 176
35
36 # Prepare seed
37 pcc_coords = (0, -53, 26)
38
39 #########################################################################
40 # Estimate contrasts
41 # ------------------
42 # Specify the contrasts.
43 seed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,
44 standardize=True, low_pass=0.1,
45 high_pass=0.01, t_r=2.,
46 memory='nilearn_cache',
47 memory_level=1, verbose=0)
48 seed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])
49 frametimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)
50 design_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',
51 add_regs=seed_time_series,
52 add_reg_names=["pcc_seed"])
53 dmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))
54 contrasts = {'seed_based_glm': dmn_contrast}
55
56 #########################################################################
57 # Perform first level analysis
58 # ----------------------------
59 # Setup and fit GLM.
60 first_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)
61 first_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],
62 design_matrices=design_matrix)
63
64 #########################################################################
65 # Estimate the contrast.
66 print('Contrast seed_based_glm computed.')
67 z_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],
68 output_type='z_score')
69
70 # Saving snapshots of the contrasts
71 filename = 'dmn_z_map.png'
72 display = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',
73 cut_coords=pcc_coords)
74 display.add_markers(marker_coords=[pcc_coords], marker_color='g',
75 marker_size=300)
76 display.savefig(filename)
77 print("Save z-map in '{0}'.".format(filename))
78
79 ###########################################################################
80 # Generating a report
81 # -------------------
82 # It can be useful to quickly generate a
83 # portable, ready-to-view report with most of the pertinent information.
84 # This is easy to do if you have a fitted model and the list of contrasts,
85 # which we do here.
86
87 from nilearn.reporting import make_glm_report
88
89 report = make_glm_report(first_level_model,
90 contrasts=contrasts,
91 title='ADHD DMN Report',
92 cluster_threshold=15,
93 min_distance=8.,
94 plot_type='glass',
95 )
96
97 #########################################################################
98 # We have several ways to access the report:
99
100 # report # This report can be viewed in a notebook
101 # report.save_as_html('report.html')
102 # report.open_in_browser()
103
[end of examples/04_glm_first_level/plot_adhd_dmn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/04_glm_first_level/plot_adhd_dmn.py b/examples/04_glm_first_level/plot_adhd_dmn.py
--- a/examples/04_glm_first_level/plot_adhd_dmn.py
+++ b/examples/04_glm_first_level/plot_adhd_dmn.py
@@ -1,4 +1,4 @@
-"""Default Mode Network extraction of AHDH dataset
+"""Default Mode Network extraction of ADHD dataset
===============================================
This example shows a full step-by-step workflow of fitting a GLM to data
| {"golden_diff": "diff --git a/examples/04_glm_first_level/plot_adhd_dmn.py b/examples/04_glm_first_level/plot_adhd_dmn.py\n--- a/examples/04_glm_first_level/plot_adhd_dmn.py\n+++ b/examples/04_glm_first_level/plot_adhd_dmn.py\n@@ -1,4 +1,4 @@\n-\"\"\"Default Mode Network extraction of AHDH dataset\n+\"\"\"Default Mode Network extraction of ADHD dataset\n ===============================================\n \n This example shows a full step-by-step workflow of fitting a GLM to data\n", "issue": "Spelling Error\n<!--Describe your proposed enhancement in detail.-->\r\nI think the authors meant to describe ADHD but have written ADHD as AHDH. It is just a simple spelling or typographic error.\r\n<!--List any pages that would be impacted by the enhancement.-->\r\n### Affected pages\r\n1. https://nilearn.github.io/dev/auto_examples/04_glm_first_level/plot_adhd_dmn.html#sphx-glr-auto-examples-04-glm-first-level-plot-adhd-dmn-py\r\n 2. https://nilearn.github.io/dev/glm/first_level_model.html#fitting-a-first-level-model\n", "before_files": [{"content": "\"\"\"Default Mode Network extraction of AHDH dataset\n===============================================\n\nThis example shows a full step-by-step workflow of fitting a GLM to data\nextracted from a seed on the Posterior Cingulate Cortex and saving the results.\n\nMore specifically:\n\n1. A sequence of fMRI volumes are loaded.\n2. A design matrix with the Posterior Cingulate Cortex seed is defined.\n3. A GLM is applied to the dataset (effect/covariance, then contrast estimation).\n4. The Default Mode Network is displayed.\n\n.. include:: ../../../examples/masker_note.rst\n\n\"\"\"\nimport numpy as np\n\nfrom nilearn import datasets, plotting\nfrom nilearn.maskers import NiftiSpheresMasker\n\nfrom nilearn.glm.first_level import FirstLevelModel\nfrom nilearn.glm.first_level import make_first_level_design_matrix\n\n#########################################################################\n# Prepare data and analysis parameters\n# -------------------------------------\n# Prepare the data.\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# Prepare timing\nt_r = 2.\nslice_time_ref = 0.\nn_scans = 176\n\n# Prepare seed\npcc_coords = (0, -53, 26)\n\n#########################################################################\n# Estimate contrasts\n# ------------------\n# Specify the contrasts.\nseed_masker = NiftiSpheresMasker([pcc_coords], radius=10, detrend=True,\n standardize=True, low_pass=0.1,\n high_pass=0.01, t_r=2.,\n memory='nilearn_cache',\n memory_level=1, verbose=0)\nseed_time_series = seed_masker.fit_transform(adhd_dataset.func[0])\nframetimes = np.linspace(0, (n_scans - 1) * t_r, n_scans)\ndesign_matrix = make_first_level_design_matrix(frametimes, hrf_model='spm',\n add_regs=seed_time_series,\n add_reg_names=[\"pcc_seed\"])\ndmn_contrast = np.array([1] + [0] * (design_matrix.shape[1] - 1))\ncontrasts = {'seed_based_glm': dmn_contrast}\n\n#########################################################################\n# Perform first level analysis\n# ----------------------------\n# Setup and fit GLM.\nfirst_level_model = FirstLevelModel(t_r=t_r, slice_time_ref=slice_time_ref)\nfirst_level_model = first_level_model.fit(run_imgs=adhd_dataset.func[0],\n design_matrices=design_matrix)\n\n#########################################################################\n# Estimate the contrast.\nprint('Contrast seed_based_glm computed.')\nz_map = first_level_model.compute_contrast(contrasts['seed_based_glm'],\n output_type='z_score')\n\n# Saving snapshots of the contrasts\nfilename = 'dmn_z_map.png'\ndisplay = plotting.plot_stat_map(z_map, threshold=3.0, title='Seed based GLM',\n cut_coords=pcc_coords)\ndisplay.add_markers(marker_coords=[pcc_coords], marker_color='g',\n marker_size=300)\ndisplay.savefig(filename)\nprint(\"Save z-map in '{0}'.\".format(filename))\n\n###########################################################################\n# Generating a report\n# -------------------\n# It can be useful to quickly generate a\n# portable, ready-to-view report with most of the pertinent information.\n# This is easy to do if you have a fitted model and the list of contrasts,\n# which we do here.\n\nfrom nilearn.reporting import make_glm_report\n\nreport = make_glm_report(first_level_model,\n contrasts=contrasts,\n title='ADHD DMN Report',\n cluster_threshold=15,\n min_distance=8.,\n plot_type='glass',\n )\n\n#########################################################################\n# We have several ways to access the report:\n\n# report # This report can be viewed in a notebook\n# report.save_as_html('report.html')\n# report.open_in_browser()\n", "path": "examples/04_glm_first_level/plot_adhd_dmn.py"}]} | 1,711 | 125 |
gh_patches_debug_850 | rasdani/github-patches | git_diff | dotkom__onlineweb4-1359 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Option to post video in article
Make it possible to post video in article from dashboard.
</issue>
<code>
[start of apps/article/dashboard/forms.py]
1 # -*- encoding: utf-8 -*-
2 from django import forms
3
4 from apps.article.models import Article
5 from apps.dashboard.widgets import DatetimePickerInput, multiple_widget_generator
6 from apps.gallery.widgets import SingleImageInput
7
8 from taggit.forms import TagWidget
9
10
11 class ArticleForm(forms.ModelForm):
12
13 class Meta(object):
14 """
15 Add fields that should have DTP activated in the datetimepicker_fields list
16 """
17
18 model = Article
19 fields = [
20 'heading',
21 'ingress_short',
22 'ingress',
23 'content',
24 'image',
25 'published_date',
26 'authors',
27 'tags',
28 'featured'
29 ]
30
31 # Fields should be a mapping between field name and an attribute dictionary
32 img_fields = [('image', {'id': 'responsive-image-id'})]
33 dtp_fields = [('published_date', {})]
34 widgetlist = [
35 (DatetimePickerInput, dtp_fields),
36 (SingleImageInput, img_fields)
37 ]
38
39 # Multiple widget generator merges results from regular widget_generator into a single widget dict
40 widgets = multiple_widget_generator(widgetlist)
41 widgets.update({'tags': TagWidget(attrs={'placeholder': 'Eksempel: åre, online, kjelleren'})})
42 labels = {
43 'tags': u'Tags'
44 }
45
[end of apps/article/dashboard/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/article/dashboard/forms.py b/apps/article/dashboard/forms.py
--- a/apps/article/dashboard/forms.py
+++ b/apps/article/dashboard/forms.py
@@ -22,6 +22,7 @@
'ingress',
'content',
'image',
+ 'video',
'published_date',
'authors',
'tags',
| {"golden_diff": "diff --git a/apps/article/dashboard/forms.py b/apps/article/dashboard/forms.py\n--- a/apps/article/dashboard/forms.py\n+++ b/apps/article/dashboard/forms.py\n@@ -22,6 +22,7 @@\n 'ingress',\n 'content',\n 'image',\n+ 'video',\n 'published_date',\n 'authors',\n 'tags',\n", "issue": "Option to post video in article\nMake it possible to post video in article from dashboard.\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nfrom django import forms\n\nfrom apps.article.models import Article\nfrom apps.dashboard.widgets import DatetimePickerInput, multiple_widget_generator\nfrom apps.gallery.widgets import SingleImageInput\n\nfrom taggit.forms import TagWidget\n\n\nclass ArticleForm(forms.ModelForm):\n\n class Meta(object):\n \"\"\"\n Add fields that should have DTP activated in the datetimepicker_fields list\n \"\"\"\n\n model = Article\n fields = [\n 'heading',\n 'ingress_short',\n 'ingress',\n 'content',\n 'image',\n 'published_date',\n 'authors',\n 'tags',\n 'featured'\n ]\n\n # Fields should be a mapping between field name and an attribute dictionary\n img_fields = [('image', {'id': 'responsive-image-id'})]\n dtp_fields = [('published_date', {})]\n widgetlist = [\n (DatetimePickerInput, dtp_fields),\n (SingleImageInput, img_fields)\n ]\n\n # Multiple widget generator merges results from regular widget_generator into a single widget dict\n widgets = multiple_widget_generator(widgetlist)\n widgets.update({'tags': TagWidget(attrs={'placeholder': 'Eksempel: \u00e5re, online, kjelleren'})})\n labels = {\n 'tags': u'Tags'\n }\n", "path": "apps/article/dashboard/forms.py"}]} | 912 | 76 |
gh_patches_debug_26748 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-966 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Example of groupby_agg shows wrong output
# Brief Description of Fix
<!-- Please describe the fix in terms of a "before" and "after". In other words, what's not so good about the current docs
page, and what you would like to see it become.
Example starter wording is provided. -->
Currently, the docs for `groupby_agg` uses `new_column_name='count'` but the result shown in the docs has column name 'size'.
<img width="554" alt="image" src="https://user-images.githubusercontent.com/30731072/146632174-9b4a1022-d205-4ead-855e-76a3dccb7b7a.png">
The correct column name should be 'count'.
<img width="330" alt="image" src="https://user-images.githubusercontent.com/30731072/146632197-7607612a-8388-4383-846b-07ac3d7b522d.png">
```
group var1 count
0 1 1 4
1 1 1 4
2 1 1 4
3 1 1 4
4 1 2 1
5 2 1 1
6 2 2 3
7 2 2 3
8 2 2 3
9 2 3 1
```
Seems like a simple enough fix, I'm happy to open a PR if you'ld like
# Relevant Context
<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available
to get you started. -->
- [Link to documentation page](https://pyjanitor-devs.github.io/pyjanitor/api/functions/#janitor.functions.groupby_agg)
- [Link to exact file to be edited](https://github.com/pyjanitor-devs/pyjanitor/blob/dev/janitor/functions/groupby_agg.py)
</issue>
<code>
[start of janitor/functions/groupby_agg.py]
1 from typing import Callable, List, Union
2 import pandas_flavor as pf
3 import pandas as pd
4
5 from janitor.utils import deprecated_alias
6
7
8 @pf.register_dataframe_method
9 @deprecated_alias(new_column="new_column_name", agg_column="agg_column_name")
10 def groupby_agg(
11 df: pd.DataFrame,
12 by: Union[List, str],
13 new_column_name: str,
14 agg_column_name: str,
15 agg: Union[Callable, str],
16 dropna: bool = True,
17 ) -> pd.DataFrame:
18 """
19 Shortcut for assigning a groupby-transform to a new column.
20
21 This method does not mutate the original DataFrame.
22
23 Without this function, we would have to write a verbose line:
24
25 ```python
26 df = df.assign(...=df.groupby(...)[...].transform(...))
27 ```
28
29 Now, this function can be method-chained:
30
31 ```python
32 import pandas as pd
33 import janitor
34 df = pd.DataFrame(...).groupby_agg(by='group',
35 agg='mean',
36 agg_column_name="col1"
37 new_column_name='col1_mean_by_group',
38 dropna = True/False)
39 ```
40
41 Functional usage syntax:
42
43 ```python
44
45 import pandas as pd
46 import janitor as jn
47
48 jn.groupby_agg(
49 df,
50 by= column name/list of column names,
51 agg=aggregation function,
52 agg_column_name = col,
53 new_column_name= new column name,
54 dropna = True/False)
55 ```
56
57 Method chaining usage syntax:
58
59 ```python
60
61 df.groupby_agg(
62 by= column name/list of column names,
63 agg=aggregation function,
64 agg_column_name = col,
65 new_column_name= new column name,
66 dropna = True/False)
67 ```
68
69
70 :param df: A pandas DataFrame.
71 :param by: Column(s) to groupby on, either a `str` or
72 a `list` of `str`
73 :param new_column_name: Name of the aggregation output column.
74 :param agg_column_name: Name of the column to aggregate over.
75 :param agg: How to aggregate.
76 :param dropna: Whether or not to include null values,
77 if present in the `by` column(s). Default is True.
78 :returns: A pandas DataFrame.
79 """
80
81 return df.assign(
82 **{
83 new_column_name: df.groupby(by, dropna=dropna)[
84 agg_column_name
85 ].transform(agg)
86 }
87 )
88
[end of janitor/functions/groupby_agg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/functions/groupby_agg.py b/janitor/functions/groupby_agg.py
--- a/janitor/functions/groupby_agg.py
+++ b/janitor/functions/groupby_agg.py
@@ -35,7 +35,7 @@
agg='mean',
agg_column_name="col1"
new_column_name='col1_mean_by_group',
- dropna = True/False)
+ dropna=True/False)
```
Functional usage syntax:
@@ -56,6 +56,41 @@
Method chaining usage syntax:
+ ```python
+ df.groupby_agg(
+ by=['group', 'var1'],
+ agg='size',
+ agg_column_name='var1',
+ new_column_name='count',
+ )
+ ```
+
+ group var1 count
+ 0 1 1 4
+ 1 1 1 4
+ 2 1 1 4
+ 3 1 1 4
+ 4 1 2 1
+ 5 2 1 1
+ 6 2 2 3
+ 7 2 2 3
+ 8 2 2 3
+ 9 2 3 1
+
+ If the data has null values,
+ you can include the null values by passing `False` to `dropna`;
+ this feature was introduced in Pandas 1.1:
+
+ name type num nulls
+ 0 black chair 4 1.0
+ 1 black chair 5 1.0
+ 2 black sofa 12 NaN
+ 3 red sofa 4 NaN
+ 4 red plate 3 3.0
+
+ Let's get the count, including the null values,
+ grouping on `nulls` column:
+
```python
df.groupby_agg(
| {"golden_diff": "diff --git a/janitor/functions/groupby_agg.py b/janitor/functions/groupby_agg.py\n--- a/janitor/functions/groupby_agg.py\n+++ b/janitor/functions/groupby_agg.py\n@@ -35,7 +35,7 @@\n agg='mean',\n agg_column_name=\"col1\"\n new_column_name='col1_mean_by_group',\n- dropna = True/False)\n+ dropna=True/False)\n ```\n \n Functional usage syntax:\n@@ -56,6 +56,41 @@\n \n Method chaining usage syntax:\n \n+ ```python\n+ df.groupby_agg(\n+ by=['group', 'var1'],\n+ agg='size',\n+ agg_column_name='var1',\n+ new_column_name='count',\n+ )\n+ ```\n+\n+ group var1 count\n+ 0 1 1 4\n+ 1 1 1 4\n+ 2 1 1 4\n+ 3 1 1 4\n+ 4 1 2 1\n+ 5 2 1 1\n+ 6 2 2 3\n+ 7 2 2 3\n+ 8 2 2 3\n+ 9 2 3 1\n+\n+ If the data has null values,\n+ you can include the null values by passing `False` to `dropna`;\n+ this feature was introduced in Pandas 1.1:\n+\n+ name type num nulls\n+ 0 black chair 4 1.0\n+ 1 black chair 5 1.0\n+ 2 black sofa 12 NaN\n+ 3 red sofa 4 NaN\n+ 4 red plate 3 3.0\n+\n+ Let's get the count, including the null values,\n+ grouping on `nulls` column:\n+\n ```python\n \n df.groupby_agg(\n", "issue": "Example of groupby_agg shows wrong output\n# Brief Description of Fix\r\n\r\n<!-- Please describe the fix in terms of a \"before\" and \"after\". In other words, what's not so good about the current docs\r\npage, and what you would like to see it become.\r\n\r\nExample starter wording is provided. -->\r\n\r\nCurrently, the docs for `groupby_agg` uses `new_column_name='count'` but the result shown in the docs has column name 'size'.\r\n\r\n<img width=\"554\" alt=\"image\" src=\"https://user-images.githubusercontent.com/30731072/146632174-9b4a1022-d205-4ead-855e-76a3dccb7b7a.png\">\r\n\r\nThe correct column name should be 'count'.\r\n\r\n<img width=\"330\" alt=\"image\" src=\"https://user-images.githubusercontent.com/30731072/146632197-7607612a-8388-4383-846b-07ac3d7b522d.png\">\r\n\r\n```\r\n group var1 count\r\n0 1 1 4\r\n1 1 1 4\r\n2 1 1 4\r\n3 1 1 4\r\n4 1 2 1\r\n5 2 1 1\r\n6 2 2 3\r\n7 2 2 3\r\n8 2 2 3\r\n9 2 3 1\r\n```\r\n\r\nSeems like a simple enough fix, I'm happy to open a PR if you'ld like\r\n\r\n# Relevant Context\r\n\r\n<!-- Please put here, in bullet points, links to the relevant docs page. A few starting template points are available\r\nto get you started. -->\r\n\r\n- [Link to documentation page](https://pyjanitor-devs.github.io/pyjanitor/api/functions/#janitor.functions.groupby_agg)\r\n- [Link to exact file to be edited](https://github.com/pyjanitor-devs/pyjanitor/blob/dev/janitor/functions/groupby_agg.py)\r\n\n", "before_files": [{"content": "from typing import Callable, List, Union\nimport pandas_flavor as pf\nimport pandas as pd\n\nfrom janitor.utils import deprecated_alias\n\n\[email protected]_dataframe_method\n@deprecated_alias(new_column=\"new_column_name\", agg_column=\"agg_column_name\")\ndef groupby_agg(\n df: pd.DataFrame,\n by: Union[List, str],\n new_column_name: str,\n agg_column_name: str,\n agg: Union[Callable, str],\n dropna: bool = True,\n) -> pd.DataFrame:\n \"\"\"\n Shortcut for assigning a groupby-transform to a new column.\n\n This method does not mutate the original DataFrame.\n\n Without this function, we would have to write a verbose line:\n\n ```python\n df = df.assign(...=df.groupby(...)[...].transform(...))\n ```\n\n Now, this function can be method-chained:\n\n ```python\n import pandas as pd\n import janitor\n df = pd.DataFrame(...).groupby_agg(by='group',\n agg='mean',\n agg_column_name=\"col1\"\n new_column_name='col1_mean_by_group',\n dropna = True/False)\n ```\n\n Functional usage syntax:\n\n ```python\n\n import pandas as pd\n import janitor as jn\n\n jn.groupby_agg(\n df,\n by= column name/list of column names,\n agg=aggregation function,\n agg_column_name = col,\n new_column_name= new column name,\n dropna = True/False)\n ```\n\n Method chaining usage syntax:\n\n ```python\n\n df.groupby_agg(\n by= column name/list of column names,\n agg=aggregation function,\n agg_column_name = col,\n new_column_name= new column name,\n dropna = True/False)\n ```\n\n\n :param df: A pandas DataFrame.\n :param by: Column(s) to groupby on, either a `str` or\n a `list` of `str`\n :param new_column_name: Name of the aggregation output column.\n :param agg_column_name: Name of the column to aggregate over.\n :param agg: How to aggregate.\n :param dropna: Whether or not to include null values,\n if present in the `by` column(s). Default is True.\n :returns: A pandas DataFrame.\n \"\"\"\n\n return df.assign(\n **{\n new_column_name: df.groupby(by, dropna=dropna)[\n agg_column_name\n ].transform(agg)\n }\n )\n", "path": "janitor/functions/groupby_agg.py"}]} | 1,749 | 495 |
gh_patches_debug_48464 | rasdani/github-patches | git_diff | mirumee__ariadne-840 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update Starlette dependency to 0.19
Starlette 0.19 has been released, we should bump Ariadne's version to it before releasing 0.15
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.7",
12 "Programming Language :: Python :: 3.8",
13 "Programming Language :: Python :: 3.9",
14 "Programming Language :: Python :: 3.10",
15 "Topic :: Software Development :: Libraries :: Python Modules",
16 ]
17
18 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
19 with open(README_PATH, "r", encoding="utf8") as f:
20 README = f.read()
21
22 setup(
23 name="ariadne",
24 author="Mirumee Software",
25 author_email="[email protected]",
26 description="Ariadne is a Python library for implementing GraphQL servers.",
27 long_description=README,
28 long_description_content_type="text/markdown",
29 license="BSD",
30 version="0.15.0.dev5",
31 url="https://github.com/mirumee/ariadne",
32 packages=["ariadne"],
33 include_package_data=True,
34 install_requires=[
35 "graphql-core>=3.2.0,<3.3",
36 "starlette<0.19",
37 "typing_extensions>=3.6.0",
38 ],
39 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
40 classifiers=CLASSIFIERS,
41 platforms=["any"],
42 zip_safe=False,
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette<0.19",
+ "starlette>0.17<0.20",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n- \"starlette<0.19\",\n+ \"starlette>0.17<0.20\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n", "issue": "Update Starlette dependency to 0.19\nStarlette 0.19 has been released, we should bump Ariadne's version to it before releasing 0.15 \n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.15.0.dev5\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette<0.19\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,008 | 120 |
gh_patches_debug_6633 | rasdani/github-patches | git_diff | ESMCI__cime-544 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Get PFS test working.
PFS does not produce history files because it does not set HIST_N or HIST_OPTION. The test fails because it tries to move history files that aren't there.
</issue>
<code>
[start of utils/python/CIME/SystemTests/pfs.py]
1 """
2 CIME performance test This class inherits from SystemTestsCommon
3
4 20 days performance test, no restart files written
5 """
6
7 from CIME.XML.standard_module_setup import *
8 from CIME.SystemTests.system_tests_common import SystemTestsCommon
9
10 logger = logging.getLogger(__name__)
11
12 class PFS(SystemTestsCommon):
13
14 def __init__(self, case):
15 """
16 initialize an object interface to the PFS system test
17 """
18 SystemTestsCommon.__init__(self, case)
19
20 def run_phase(self):
21 self._case.set_value("STOP_OPTION", "ndays")
22 self._case.set_value("STOP_N", 20)
23 self._case.set_value("REST_OPTION","none")
24 self._case.set_value("CONTINUE_RUN", False)
25 self._case.flush()
26
27 logger.info("doing an 20 day initial test, no restarts written")
28 self.run_indv()
29
[end of utils/python/CIME/SystemTests/pfs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/SystemTests/pfs.py b/utils/python/CIME/SystemTests/pfs.py
--- a/utils/python/CIME/SystemTests/pfs.py
+++ b/utils/python/CIME/SystemTests/pfs.py
@@ -18,11 +18,5 @@
SystemTestsCommon.__init__(self, case)
def run_phase(self):
- self._case.set_value("STOP_OPTION", "ndays")
- self._case.set_value("STOP_N", 20)
- self._case.set_value("REST_OPTION","none")
- self._case.set_value("CONTINUE_RUN", False)
- self._case.flush()
-
logger.info("doing an 20 day initial test, no restarts written")
- self.run_indv()
+ self.run_indv(suffix=None)
| {"golden_diff": "diff --git a/utils/python/CIME/SystemTests/pfs.py b/utils/python/CIME/SystemTests/pfs.py\n--- a/utils/python/CIME/SystemTests/pfs.py\n+++ b/utils/python/CIME/SystemTests/pfs.py\n@@ -18,11 +18,5 @@\n SystemTestsCommon.__init__(self, case)\n \n def run_phase(self):\n- self._case.set_value(\"STOP_OPTION\", \"ndays\")\n- self._case.set_value(\"STOP_N\", 20)\n- self._case.set_value(\"REST_OPTION\",\"none\")\n- self._case.set_value(\"CONTINUE_RUN\", False)\n- self._case.flush()\n-\n logger.info(\"doing an 20 day initial test, no restarts written\")\n- self.run_indv()\n+ self.run_indv(suffix=None)\n", "issue": "Get PFS test working.\nPFS does not produce history files because it does not set HIST_N or HIST_OPTION. The test fails because it tries to move history files that aren't there.\n\n", "before_files": [{"content": "\"\"\"\nCIME performance test This class inherits from SystemTestsCommon\n\n20 days performance test, no restart files written\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.SystemTests.system_tests_common import SystemTestsCommon\n\nlogger = logging.getLogger(__name__)\n\nclass PFS(SystemTestsCommon):\n\n def __init__(self, case):\n \"\"\"\n initialize an object interface to the PFS system test\n \"\"\"\n SystemTestsCommon.__init__(self, case)\n\n def run_phase(self):\n self._case.set_value(\"STOP_OPTION\", \"ndays\")\n self._case.set_value(\"STOP_N\", 20)\n self._case.set_value(\"REST_OPTION\",\"none\")\n self._case.set_value(\"CONTINUE_RUN\", False)\n self._case.flush()\n\n logger.info(\"doing an 20 day initial test, no restarts written\")\n self.run_indv()\n", "path": "utils/python/CIME/SystemTests/pfs.py"}]} | 829 | 179 |
gh_patches_debug_4430 | rasdani/github-patches | git_diff | jupyter__docker-stacks-1412 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
We fail to push our multi-arch images
We successfully login with `docker login`, but we fail to push images. I'm unsure why, we only get 401 Unauthorized and that's it.
### From [CI run that failed](https://github.com/jupyter/docker-stacks/runs/3090573663?check_suite_focus=true)
```
#36 exporting to image
#36 exporting manifest sha256:f4885e2ee0a2d6f4484aa75b005e585414a56981e31ed2343f8a9f2dee27d2bf done
#36 exporting config sha256:125cf7a049b119220a812d64eeae4ed7e3c3fd61e92aed6fcb118251165062ba done
#36 exporting manifest sha256:e6b8d5dbd2a1cb3022cd214946f2290ac65aed7fc3787fa8fbdbd655c344ac28 done
#36 exporting config sha256:4ae4f82b528de65a8f301eef09d66007865189d8b405fe2f65ea6d10c7994760 done
#36 exporting manifest list sha256:30be78018e77875a79b18949ec26fcc8760399e82da17aefb0281bb177f0c2a0 done
#36 pushing layers
#36 ...
#38 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#38 DONE 0.0s
#39 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#39 DONE 0.0s
#40 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#40 DONE 0.0s
#41 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#41 DONE 0.0s
#42 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#42 DONE 0.0s
#43 [auth] library/40b549a81031:pull,push token for registry-1.docker.io
#43 DONE 0.0s
#36 exporting to image
#36 pushing layers 0.4s done
#36 ERROR: authorization status: 401: authorization failed
------
> exporting to image:
------
error: failed to solve: authorization status: 401: authorization failed
make: *** [Makefile:191: push-multi/base-notebook] Error 1
make: Leaving directory '/home/runner/work/docker-stacks/docker-stacks/main'
Error: Process completed with exit code 2.
```
</issue>
<code>
[start of tagging/tag_image.py]
1 #!/usr/bin/env python3
2 # Copyright (c) Jupyter Development Team.
3 # Distributed under the terms of the Modified BSD License.
4 import argparse
5 import logging
6 from plumbum.cmd import docker
7 from .docker_runner import DockerRunner
8 from .get_taggers_and_manifests import get_taggers_and_manifests
9 from .github_set_env import github_set_env
10
11
12 logger = logging.getLogger(__name__)
13
14
15 def tag_image(short_image_name: str, owner: str) -> None:
16 """
17 Tags <owner>/<short_image_name>:latest with the tags reported by all taggers
18 for the given image.
19
20 Tags are in a GitHub Actions environment also saved to environment variables
21 in a format making it easy to append them.
22 """
23 logger.info(f"Tagging image: {short_image_name}")
24 taggers, _ = get_taggers_and_manifests(short_image_name)
25
26 image = f"{owner}/{short_image_name}:latest"
27
28 with DockerRunner(image) as container:
29 tags = []
30 for tagger in taggers:
31 tagger_name = tagger.__name__
32 tag_value = tagger.tag_value(container)
33 tags.append(tag_value)
34 logger.info(
35 f"Applying tag tagger_name: {tagger_name} tag_value: {tag_value}"
36 )
37 docker["tag", image, f"{owner}/{short_image_name}:{tag_value}"]()
38
39 if tags:
40 env_name = f'{short_image_name.replace("-", "_")}_EXTRA_TAG_ARGS'
41 docker_build_tag_args = "-t " + " -t ".join(tags)
42 github_set_env(env_name, docker_build_tag_args)
43
44
45 if __name__ == "__main__":
46 logging.basicConfig(level=logging.INFO)
47
48 arg_parser = argparse.ArgumentParser()
49 arg_parser.add_argument(
50 "--short-image-name",
51 required=True,
52 help="Short image name to apply tags for",
53 )
54 arg_parser.add_argument("--owner", required=True, help="Owner of the image")
55 args = arg_parser.parse_args()
56
57 tag_image(args.short_image_name, args.owner)
58
[end of tagging/tag_image.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tagging/tag_image.py b/tagging/tag_image.py
--- a/tagging/tag_image.py
+++ b/tagging/tag_image.py
@@ -38,7 +38,9 @@
if tags:
env_name = f'{short_image_name.replace("-", "_")}_EXTRA_TAG_ARGS'
- docker_build_tag_args = "-t " + " -t ".join(tags)
+ docker_build_tag_args = " ".join(
+ [f"-t {owner}/{short_image_name}:{tag}" for tag in tags]
+ )
github_set_env(env_name, docker_build_tag_args)
| {"golden_diff": "diff --git a/tagging/tag_image.py b/tagging/tag_image.py\n--- a/tagging/tag_image.py\n+++ b/tagging/tag_image.py\n@@ -38,7 +38,9 @@\n \n if tags:\n env_name = f'{short_image_name.replace(\"-\", \"_\")}_EXTRA_TAG_ARGS'\n- docker_build_tag_args = \"-t \" + \" -t \".join(tags)\n+ docker_build_tag_args = \" \".join(\n+ [f\"-t {owner}/{short_image_name}:{tag}\" for tag in tags]\n+ )\n github_set_env(env_name, docker_build_tag_args)\n", "issue": "We fail to push our multi-arch images\nWe successfully login with `docker login`, but we fail to push images. I'm unsure why, we only get 401 Unauthorized and that's it.\r\n\r\n### From [CI run that failed](https://github.com/jupyter/docker-stacks/runs/3090573663?check_suite_focus=true)\r\n\r\n```\r\n #36 exporting to image\r\n #36 exporting manifest sha256:f4885e2ee0a2d6f4484aa75b005e585414a56981e31ed2343f8a9f2dee27d2bf done\r\n #36 exporting config sha256:125cf7a049b119220a812d64eeae4ed7e3c3fd61e92aed6fcb118251165062ba done\r\n #36 exporting manifest sha256:e6b8d5dbd2a1cb3022cd214946f2290ac65aed7fc3787fa8fbdbd655c344ac28 done\r\n #36 exporting config sha256:4ae4f82b528de65a8f301eef09d66007865189d8b405fe2f65ea6d10c7994760 done\r\n #36 exporting manifest list sha256:30be78018e77875a79b18949ec26fcc8760399e82da17aefb0281bb177f0c2a0 done\r\n #36 pushing layers\r\n #36 ...\r\n \r\n #38 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #38 DONE 0.0s\r\n \r\n #39 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #39 DONE 0.0s\r\n \r\n #40 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #40 DONE 0.0s\r\n \r\n #41 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #41 DONE 0.0s\r\n \r\n #42 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #42 DONE 0.0s\r\n \r\n #43 [auth] library/40b549a81031:pull,push token for registry-1.docker.io\r\n #43 DONE 0.0s\r\n \r\n #36 exporting to image\r\n #36 pushing layers 0.4s done\r\n #36 ERROR: authorization status: 401: authorization failed\r\n ------\r\n > exporting to image:\r\n ------\r\n error: failed to solve: authorization status: 401: authorization failed\r\n make: *** [Makefile:191: push-multi/base-notebook] Error 1\r\n make: Leaving directory '/home/runner/work/docker-stacks/docker-stacks/main'\r\n Error: Process completed with exit code 2.\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\nimport argparse\nimport logging\nfrom plumbum.cmd import docker\nfrom .docker_runner import DockerRunner\nfrom .get_taggers_and_manifests import get_taggers_and_manifests\nfrom .github_set_env import github_set_env\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef tag_image(short_image_name: str, owner: str) -> None:\n \"\"\"\n Tags <owner>/<short_image_name>:latest with the tags reported by all taggers\n for the given image.\n\n Tags are in a GitHub Actions environment also saved to environment variables\n in a format making it easy to append them.\n \"\"\"\n logger.info(f\"Tagging image: {short_image_name}\")\n taggers, _ = get_taggers_and_manifests(short_image_name)\n\n image = f\"{owner}/{short_image_name}:latest\"\n\n with DockerRunner(image) as container:\n tags = []\n for tagger in taggers:\n tagger_name = tagger.__name__\n tag_value = tagger.tag_value(container)\n tags.append(tag_value)\n logger.info(\n f\"Applying tag tagger_name: {tagger_name} tag_value: {tag_value}\"\n )\n docker[\"tag\", image, f\"{owner}/{short_image_name}:{tag_value}\"]()\n\n if tags:\n env_name = f'{short_image_name.replace(\"-\", \"_\")}_EXTRA_TAG_ARGS'\n docker_build_tag_args = \"-t \" + \" -t \".join(tags)\n github_set_env(env_name, docker_build_tag_args)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\n \"--short-image-name\",\n required=True,\n help=\"Short image name to apply tags for\",\n )\n arg_parser.add_argument(\"--owner\", required=True, help=\"Owner of the image\")\n args = arg_parser.parse_args()\n\n tag_image(args.short_image_name, args.owner)\n", "path": "tagging/tag_image.py"}]} | 1,924 | 133 |
gh_patches_debug_19274 | rasdani/github-patches | git_diff | nautobot__nautobot-5223 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom field date-type objects AssertionError in GraphQL
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.5.17b1, also tested in 1.5.8
* Python version: 3.9
* Database platform, version: psql 13.10
* Middleware(s): None
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create a [custom field object](https://demo.nautobot.com/extras/custom-fields/last_backup/?tab=main) for the Device type template with the type as Date
2. Set a date on a [device](https://demo.nautobot.com/dcim/devices/4d7aecac-addd-4f3c-8efc-e102872b54e5/?tab=main) in YYYY-MM-DD iso8601 format
3. You have to restart nautobot-server; we may need a post_save signal event for this as well, which would be a separate issue if so
4. Using GraphQL attempt to retrieve the value of this field from a device
`
{
devices (name: "ams01-dist-01") {
name,
cf_last_backup
}
}
`
<!-- What did you expect to happen? -->
### Expected Behavior
Query to return the value of the Date
<!-- What happened instead? -->
### Observed Behavior
* GrapQL returns `"message": "Received not compatible date \"'2023-04-26'\""`
* stdout from nautobot-server gets an exception
`[27/Apr/2023 17:20:01] "POST /graphql/ HTTP/1.1" 200 4437611
Traceback (most recent call last):
File "/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py", line 480, in complete_value_catching_error
completed = complete_value(
File "/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py", line 563, in complete_value
return complete_leaf_value(return_type, path, result)
File "/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py", line 626, in complete_leaf_value
serialized_result = return_type.serialize(result)
File "/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphene/types/datetime.py", line 23, in serialize
assert isinstance(
AssertionError: Received not compatible date "'2023-04-26'"`
</issue>
<code>
[start of nautobot/core/graphql/types.py]
1 import datetime
2
3 from django.contrib.contenttypes.models import ContentType
4 import graphene
5 import graphene_django_optimizer as gql_optimizer
6
7
8 class OptimizedNautobotObjectType(gql_optimizer.OptimizedDjangoObjectType):
9 url = graphene.String()
10
11 def resolve_url(self, info):
12 return self.get_absolute_url(api=True)
13
14 class Meta:
15 abstract = True
16
17
18 class ContentTypeType(OptimizedNautobotObjectType):
19 """
20 Graphene-Django object type for ContentType records.
21
22 Needed because ContentType is a built-in model, not one that we own and can auto-generate types for.
23 """
24
25 class Meta:
26 model = ContentType
27
28
29 class DateType(graphene.Date):
30 """
31 Overriding the default serialize method from https://github.com/graphql-python/graphene/blob/master/graphene/types/datetime.py
32 to handle the case where the date object is passed as a str object.
33 """
34
35 @staticmethod
36 def serialize(date):
37 if isinstance(date, datetime.datetime):
38 date = date.date()
39 return date.isoformat()
40 elif isinstance(date, str):
41 return date
42 else:
43 raise AssertionError(f'Received not compatible date "{date!r}"')
44
45
46 class JSON(graphene.Scalar):
47 @staticmethod
48 def serialize_data(dt):
49 return dt
50
51 serialize = serialize_data
52 parse_value = serialize_data
53 parse_literal = serialize_data
54
[end of nautobot/core/graphql/types.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nautobot/core/graphql/types.py b/nautobot/core/graphql/types.py
--- a/nautobot/core/graphql/types.py
+++ b/nautobot/core/graphql/types.py
@@ -3,6 +3,7 @@
from django.contrib.contenttypes.models import ContentType
import graphene
import graphene_django_optimizer as gql_optimizer
+from graphql import GraphQLError
class OptimizedNautobotObjectType(gql_optimizer.OptimizedDjangoObjectType):
@@ -37,10 +38,12 @@
if isinstance(date, datetime.datetime):
date = date.date()
return date.isoformat()
+ elif isinstance(date, datetime.date):
+ return date.isoformat()
elif isinstance(date, str):
return date
else:
- raise AssertionError(f'Received not compatible date "{date!r}"')
+ raise GraphQLError(f'Received not compatible date "{date!r}"')
class JSON(graphene.Scalar):
| {"golden_diff": "diff --git a/nautobot/core/graphql/types.py b/nautobot/core/graphql/types.py\n--- a/nautobot/core/graphql/types.py\n+++ b/nautobot/core/graphql/types.py\n@@ -3,6 +3,7 @@\n from django.contrib.contenttypes.models import ContentType\n import graphene\n import graphene_django_optimizer as gql_optimizer\n+from graphql import GraphQLError\n \n \n class OptimizedNautobotObjectType(gql_optimizer.OptimizedDjangoObjectType):\n@@ -37,10 +38,12 @@\n if isinstance(date, datetime.datetime):\n date = date.date()\n return date.isoformat()\n+ elif isinstance(date, datetime.date):\n+ return date.isoformat()\n elif isinstance(date, str):\n return date\n else:\n- raise AssertionError(f'Received not compatible date \"{date!r}\"')\n+ raise GraphQLError(f'Received not compatible date \"{date!r}\"')\n \n \n class JSON(graphene.Scalar):\n", "issue": "Custom field date-type objects AssertionError in GraphQL\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.5.17b1, also tested in 1.5.8\r\n* Python version: 3.9\r\n* Database platform, version: psql 13.10\r\n* Middleware(s): None\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create a [custom field object](https://demo.nautobot.com/extras/custom-fields/last_backup/?tab=main) for the Device type template with the type as Date\r\n2. Set a date on a [device](https://demo.nautobot.com/dcim/devices/4d7aecac-addd-4f3c-8efc-e102872b54e5/?tab=main) in YYYY-MM-DD iso8601 format\r\n3. You have to restart nautobot-server; we may need a post_save signal event for this as well, which would be a separate issue if so\r\n4. Using GraphQL attempt to retrieve the value of this field from a device\r\n`\r\n{\r\n devices (name: \"ams01-dist-01\") {\r\n name,\r\n cf_last_backup\r\n }\r\n}\r\n`\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nQuery to return the value of the Date\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n* GrapQL returns `\"message\": \"Received not compatible date \\\"'2023-04-26'\\\"\"`\r\n* stdout from nautobot-server gets an exception\r\n\r\n`[27/Apr/2023 17:20:01] \"POST /graphql/ HTTP/1.1\" 200 4437611\r\nTraceback (most recent call last):\r\n File \"/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py\", line 480, in complete_value_catching_error\r\n completed = complete_value(\r\n File \"/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py\", line 563, in complete_value\r\n return complete_leaf_value(return_type, path, result)\r\n File \"/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphql/execution/executor.py\", line 626, in complete_leaf_value\r\n serialized_result = return_type.serialize(result)\r\n File \"/home/meganerd/.cache/pypoetry/virtualenvs/nautobot-Sgl-QJQc-py3.9/lib/python3.9/site-packages/graphene/types/datetime.py\", line 23, in serialize\r\n assert isinstance(\r\nAssertionError: Received not compatible date \"'2023-04-26'\"`\r\n\r\n\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.contenttypes.models import ContentType\nimport graphene\nimport graphene_django_optimizer as gql_optimizer\n\n\nclass OptimizedNautobotObjectType(gql_optimizer.OptimizedDjangoObjectType):\n url = graphene.String()\n\n def resolve_url(self, info):\n return self.get_absolute_url(api=True)\n\n class Meta:\n abstract = True\n\n\nclass ContentTypeType(OptimizedNautobotObjectType):\n \"\"\"\n Graphene-Django object type for ContentType records.\n\n Needed because ContentType is a built-in model, not one that we own and can auto-generate types for.\n \"\"\"\n\n class Meta:\n model = ContentType\n\n\nclass DateType(graphene.Date):\n \"\"\"\n Overriding the default serialize method from https://github.com/graphql-python/graphene/blob/master/graphene/types/datetime.py\n to handle the case where the date object is passed as a str object.\n \"\"\"\n\n @staticmethod\n def serialize(date):\n if isinstance(date, datetime.datetime):\n date = date.date()\n return date.isoformat()\n elif isinstance(date, str):\n return date\n else:\n raise AssertionError(f'Received not compatible date \"{date!r}\"')\n\n\nclass JSON(graphene.Scalar):\n @staticmethod\n def serialize_data(dt):\n return dt\n\n serialize = serialize_data\n parse_value = serialize_data\n parse_literal = serialize_data\n", "path": "nautobot/core/graphql/types.py"}]} | 1,809 | 204 |
gh_patches_debug_11157 | rasdani/github-patches | git_diff | kivy__python-for-android-800 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cryptography recipe does not compile
I'm trying to build Kivy app with:
- sdl2 bootstrap
- recipes python2,kivy,cryptography
- buildozer 0.32
- latest python-for-android master branch
In libffi recipe I had to replace [line](https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/libffi/__init__.py#L43) with:
`shprint(sh.Command('autoreconf'), '-vif', _env=env)`.
Without that change p4a was raising sh.CommandNotFound error.
Now running `buildozer android_new debug` produces the following error log: http://pastebin.com/22qjBJwL
buildozer.spec file: http://pastebin.com/aLe7nLHG
main.py file: http://pastebin.com/xSReYe6u
</issue>
<code>
[start of pythonforandroid/recipes/libffi/__init__.py]
1 from pythonforandroid.recipe import Recipe
2 from pythonforandroid.logger import shprint
3 from pythonforandroid.util import current_directory
4 from os.path import exists, join
5 import sh
6 import glob
7
8
9 class LibffiRecipe(Recipe):
10 name = 'libffi'
11 version = 'v3.2.1'
12 url = 'https://github.com/atgreen/libffi/archive/{version}.zip'
13
14 patches = ['remove-version-info.patch']
15
16 def get_host(self, arch):
17 with current_directory(self.get_build_dir(arch.arch)):
18 host = None
19 with open('Makefile') as f:
20 for line in f:
21 if line.startswith('host = '):
22 host = line.strip()[7:]
23 break
24
25 if not host or not exists(host):
26 raise RuntimeError('failed to find build output! ({})'
27 .format(host))
28
29 return host
30
31 def should_build(self, arch):
32 # return not bool(glob.glob(join(self.ctx.get_libs_dir(arch.arch),
33 # 'libffi.so*')))
34 return not exists(join(self.ctx.get_libs_dir(arch.arch), 'libffi.so'))
35 # return not exists(join(self.ctx.get_python_install_dir(), 'lib',
36 # 'libffi.so'))
37
38 def build_arch(self, arch):
39 env = self.get_recipe_env(arch)
40 with current_directory(self.get_build_dir(arch.arch)):
41 if not exists('configure'):
42 shprint(sh.Command('./autogen.sh'), _env=env)
43 shprint(sh.Command('autoreconf -vif'), _env=env)
44 shprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,
45 '--prefix=' + self.ctx.get_python_install_dir(),
46 '--enable-shared', _env=env)
47 shprint(sh.make, '-j5', 'libffi.la', _env=env)
48
49
50 # dlname = None
51 # with open(join(host, 'libffi.la')) as f:
52 # for line in f:
53 # if line.startswith('dlname='):
54 # dlname = line.strip()[8:-1]
55 # break
56 #
57 # if not dlname or not exists(join(host, '.libs', dlname)):
58 # raise RuntimeError('failed to locate shared object! ({})'
59 # .format(dlname))
60
61 # shprint(sh.sed, '-i', 's/^dlname=.*$/dlname=\'libffi.so\'/', join(host, 'libffi.la'))
62
63 shprint(sh.cp, '-t', self.ctx.get_libs_dir(arch.arch),
64 join(self.get_host(arch), '.libs', 'libffi.so')) #,
65 # join(host, 'libffi.la'))
66
67 def get_include_dirs(self, arch):
68 return [join(self.get_build_dir(arch.arch), self.get_host(arch), 'include')]
69
70
71 recipe = LibffiRecipe()
72
[end of pythonforandroid/recipes/libffi/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/libffi/__init__.py b/pythonforandroid/recipes/libffi/__init__.py
--- a/pythonforandroid/recipes/libffi/__init__.py
+++ b/pythonforandroid/recipes/libffi/__init__.py
@@ -40,7 +40,7 @@
with current_directory(self.get_build_dir(arch.arch)):
if not exists('configure'):
shprint(sh.Command('./autogen.sh'), _env=env)
- shprint(sh.Command('autoreconf -vif'), _env=env)
+ shprint(sh.Command('autoreconf'), '-vif', _env=env)
shprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,
'--prefix=' + self.ctx.get_python_install_dir(),
'--enable-shared', _env=env)
| {"golden_diff": "diff --git a/pythonforandroid/recipes/libffi/__init__.py b/pythonforandroid/recipes/libffi/__init__.py\n--- a/pythonforandroid/recipes/libffi/__init__.py\n+++ b/pythonforandroid/recipes/libffi/__init__.py\n@@ -40,7 +40,7 @@\n \t\twith current_directory(self.get_build_dir(arch.arch)):\n \t\t\tif not exists('configure'):\n \t\t\t\tshprint(sh.Command('./autogen.sh'), _env=env)\n-\t\t\tshprint(sh.Command('autoreconf -vif'), _env=env)\n+\t\t\tshprint(sh.Command('autoreconf'), '-vif', _env=env)\n \t\t\tshprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,\n \t\t\t '--prefix=' + self.ctx.get_python_install_dir(),\n \t\t\t '--enable-shared', _env=env)\n", "issue": "Cryptography recipe does not compile\nI'm trying to build Kivy app with:\n- sdl2 bootstrap\n- recipes python2,kivy,cryptography \n- buildozer 0.32 \n- latest python-for-android master branch \n\nIn libffi recipe I had to replace [line](https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/libffi/__init__.py#L43) with:\n`shprint(sh.Command('autoreconf'), '-vif', _env=env)`.\nWithout that change p4a was raising sh.CommandNotFound error. \n\nNow running `buildozer android_new debug` produces the following error log: http://pastebin.com/22qjBJwL\nbuildozer.spec file: http://pastebin.com/aLe7nLHG\nmain.py file: http://pastebin.com/xSReYe6u\n\n", "before_files": [{"content": "from pythonforandroid.recipe import Recipe\nfrom pythonforandroid.logger import shprint\nfrom pythonforandroid.util import current_directory\nfrom os.path import exists, join\nimport sh\nimport glob\n\n\nclass LibffiRecipe(Recipe):\n\tname = 'libffi'\n\tversion = 'v3.2.1'\n\turl = 'https://github.com/atgreen/libffi/archive/{version}.zip'\n\n\tpatches = ['remove-version-info.patch']\n\n\tdef get_host(self, arch):\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\thost = None\n\t\t\twith open('Makefile') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\tif line.startswith('host = '):\n\t\t\t\t\t\thost = line.strip()[7:]\n\t\t\t\t\t\tbreak\n\n\t\t\tif not host or not exists(host):\n\t\t\t\traise RuntimeError('failed to find build output! ({})'\n\t\t\t\t .format(host))\n\t\t\t\n\t\t\treturn host\n\n\tdef should_build(self, arch):\n\t\t# return not bool(glob.glob(join(self.ctx.get_libs_dir(arch.arch),\n\t\t# 'libffi.so*')))\n\t\treturn not exists(join(self.ctx.get_libs_dir(arch.arch), 'libffi.so'))\n\t\t# return not exists(join(self.ctx.get_python_install_dir(), 'lib',\n\t\t# 'libffi.so'))\n\n\tdef build_arch(self, arch):\n\t\tenv = self.get_recipe_env(arch)\n\t\twith current_directory(self.get_build_dir(arch.arch)):\n\t\t\tif not exists('configure'):\n\t\t\t\tshprint(sh.Command('./autogen.sh'), _env=env)\n\t\t\tshprint(sh.Command('autoreconf -vif'), _env=env)\n\t\t\tshprint(sh.Command('./configure'), '--host=' + arch.toolchain_prefix,\n\t\t\t '--prefix=' + self.ctx.get_python_install_dir(),\n\t\t\t '--enable-shared', _env=env)\n\t\t\tshprint(sh.make, '-j5', 'libffi.la', _env=env)\n\n\n\t\t\t# dlname = None\n\t\t\t# with open(join(host, 'libffi.la')) as f:\n\t\t\t# \tfor line in f:\n\t\t\t# \t\tif line.startswith('dlname='):\n\t\t\t# \t\t\tdlname = line.strip()[8:-1]\n\t\t\t# \t\t\tbreak\n\t\t\t# \n\t\t\t# if not dlname or not exists(join(host, '.libs', dlname)):\n\t\t\t# \traise RuntimeError('failed to locate shared object! ({})'\n\t\t\t# \t .format(dlname))\n\n\t\t\t# shprint(sh.sed, '-i', 's/^dlname=.*$/dlname=\\'libffi.so\\'/', join(host, 'libffi.la'))\n\n\t\t\tshprint(sh.cp, '-t', self.ctx.get_libs_dir(arch.arch),\n\t\t\t join(self.get_host(arch), '.libs', 'libffi.so')) #,\n\t\t\t # join(host, 'libffi.la'))\n\n\tdef get_include_dirs(self, arch):\n\t\treturn [join(self.get_build_dir(arch.arch), self.get_host(arch), 'include')]\n\n\nrecipe = LibffiRecipe()\n", "path": "pythonforandroid/recipes/libffi/__init__.py"}]} | 1,526 | 183 |
gh_patches_debug_19803 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Braintree error when selecting address
I have had the follow error for the past couple of months:
> fetch failed for source Braintree District Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py", line 45, in fetch id = next( ^^^^^ StopIteration
I am seeing address["value"] on line 43 having a length of 11.
Removing this line cures the problem for me, and I am not sure what this line is achieving?
https://github.com/mampfes/hacs_waste_collection_schedule/blob/73baeef472c5db4c85db3a7b725df68f3a15a101/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py#L43
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py]
1 import requests
2 from bs4 import BeautifulSoup
3 from dateutil import parser
4 from waste_collection_schedule import Collection
5
6 TITLE = "Braintree District Council"
7 DESCRIPTION = "Braintree District Council, UK - Waste Collection"
8 URL = "https://www.braintree.gov.uk"
9 TEST_CASES = {
10 "30 Boars Tye Road": {"house_number": "30", "post_code": "CM8 3QE"},
11 "64 Silver Street": {"house_number": "64", "post_code": "CM8 3QG"},
12 "18 St Mary's Road": {"house_number": "1", "post_code": "CM8 3PE"},
13 "20 Peel Crescent": {"house_number": "20", "post_code": "CM7 2RS"},
14 }
15
16 ICON_MAP = {
17 "Grey Bin": "mdi:trash-can",
18 "Clear Sack": "mdi:recycle",
19 "Green Bin": "mdi:leaf",
20 "Food Bin": "mdi:food-apple",
21 }
22
23
24 class Source:
25 def __init__(self, post_code: str, house_number: str):
26 self.post_code = post_code
27 self.house_number = house_number
28 self.url = f"{URL}/xfp/form/554"
29 self.form_data = {
30 "qe15dda0155d237d1ea161004d1839e3369ed4831_0_0": (None, post_code),
31 "page": (None, 5730),
32 }
33
34 def fetch(self):
35 address_lookup = requests.post(
36 "https://www.braintree.gov.uk/xfp/form/554", files=self.form_data
37 )
38 address_lookup.raise_for_status()
39 addresses = {}
40 for address in BeautifulSoup(address_lookup.text, "html.parser").find_all(
41 "option"
42 ):
43 if len(address["value"]) == 12:
44 addresses[address["value"]] = address.text.strip()
45 id = next(
46 address
47 for address in addresses
48 if addresses[address].startswith(self.house_number)
49 )
50 self.form_data["qe15dda0155d237d1ea161004d1839e3369ed4831_1_0"] = (None, id)
51 self.form_data["next"] = (None, "Next")
52 collection_lookup = requests.post(
53 "https://www.braintree.gov.uk/xfp/form/554", files=self.form_data
54 )
55 collection_lookup.raise_for_status()
56 entries = []
57 for results in BeautifulSoup(collection_lookup.text, "html.parser").find_all(
58 "div", class_="date_display"
59 ):
60 try:
61 collection_type, collection_date = results.text.strip().split("\n")
62 entries.append(
63 Collection(
64 date=parser.parse(collection_date, dayfirst=True).date(),
65 t=collection_type,
66 icon=ICON_MAP.get(collection_type),
67 )
68 )
69 except (StopIteration, TypeError):
70 pass
71 return entries
72
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py
@@ -1,7 +1,7 @@
import requests
from bs4 import BeautifulSoup
from dateutil import parser
-from waste_collection_schedule import Collection
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
TITLE = "Braintree District Council"
DESCRIPTION = "Braintree District Council, UK - Waste Collection"
@@ -40,7 +40,7 @@
for address in BeautifulSoup(address_lookup.text, "html.parser").find_all(
"option"
):
- if len(address["value"]) == 12:
+ if len(address["value"]) > 5: # Skip the first option
addresses[address["value"]] = address.text.strip()
id = next(
address
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py\n@@ -1,7 +1,7 @@\n import requests\n from bs4 import BeautifulSoup\n from dateutil import parser\n-from waste_collection_schedule import Collection\n+from waste_collection_schedule import Collection # type: ignore[attr-defined]\n \n TITLE = \"Braintree District Council\"\n DESCRIPTION = \"Braintree District Council, UK - Waste Collection\"\n@@ -40,7 +40,7 @@\n for address in BeautifulSoup(address_lookup.text, \"html.parser\").find_all(\n \"option\"\n ):\n- if len(address[\"value\"]) == 12:\n+ if len(address[\"value\"]) > 5: # Skip the first option\n addresses[address[\"value\"]] = address.text.strip()\n id = next(\n address\n", "issue": "Braintree error when selecting address\nI have had the follow error for the past couple of months:\r\n\r\n> fetch failed for source Braintree District Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py\", line 45, in fetch id = next( ^^^^^ StopIteration\r\n\r\nI am seeing address[\"value\"] on line 43 having a length of 11.\r\nRemoving this line cures the problem for me, and I am not sure what this line is achieving?\r\n\r\nhttps://github.com/mampfes/hacs_waste_collection_schedule/blob/73baeef472c5db4c85db3a7b725df68f3a15a101/custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py#L43\n", "before_files": [{"content": "import requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Braintree District Council\"\nDESCRIPTION = \"Braintree District Council, UK - Waste Collection\"\nURL = \"https://www.braintree.gov.uk\"\nTEST_CASES = {\n \"30 Boars Tye Road\": {\"house_number\": \"30\", \"post_code\": \"CM8 3QE\"},\n \"64 Silver Street\": {\"house_number\": \"64\", \"post_code\": \"CM8 3QG\"},\n \"18 St Mary's Road\": {\"house_number\": \"1\", \"post_code\": \"CM8 3PE\"},\n \"20 Peel Crescent\": {\"house_number\": \"20\", \"post_code\": \"CM7 2RS\"},\n}\n\nICON_MAP = {\n \"Grey Bin\": \"mdi:trash-can\",\n \"Clear Sack\": \"mdi:recycle\",\n \"Green Bin\": \"mdi:leaf\",\n \"Food Bin\": \"mdi:food-apple\",\n}\n\n\nclass Source:\n def __init__(self, post_code: str, house_number: str):\n self.post_code = post_code\n self.house_number = house_number\n self.url = f\"{URL}/xfp/form/554\"\n self.form_data = {\n \"qe15dda0155d237d1ea161004d1839e3369ed4831_0_0\": (None, post_code),\n \"page\": (None, 5730),\n }\n\n def fetch(self):\n address_lookup = requests.post(\n \"https://www.braintree.gov.uk/xfp/form/554\", files=self.form_data\n )\n address_lookup.raise_for_status()\n addresses = {}\n for address in BeautifulSoup(address_lookup.text, \"html.parser\").find_all(\n \"option\"\n ):\n if len(address[\"value\"]) == 12:\n addresses[address[\"value\"]] = address.text.strip()\n id = next(\n address\n for address in addresses\n if addresses[address].startswith(self.house_number)\n )\n self.form_data[\"qe15dda0155d237d1ea161004d1839e3369ed4831_1_0\"] = (None, id)\n self.form_data[\"next\"] = (None, \"Next\")\n collection_lookup = requests.post(\n \"https://www.braintree.gov.uk/xfp/form/554\", files=self.form_data\n )\n collection_lookup.raise_for_status()\n entries = []\n for results in BeautifulSoup(collection_lookup.text, \"html.parser\").find_all(\n \"div\", class_=\"date_display\"\n ):\n try:\n collection_type, collection_date = results.text.strip().split(\"\\n\")\n entries.append(\n Collection(\n date=parser.parse(collection_date, dayfirst=True).date(),\n t=collection_type,\n icon=ICON_MAP.get(collection_type),\n )\n )\n except (StopIteration, TypeError):\n pass\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/braintree_gov_uk.py"}]} | 1,615 | 246 |
gh_patches_debug_22741 | rasdani/github-patches | git_diff | aio-libs__aiohttp-5364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Switch http_parser to llhttp
Nodejs decide to move from their hard-coded C parser to this one.
https://github.com/nodejs/http-parser/pull/285#issuecomment-456025694
*Upd:*
proposed parser is https://llhttp.org
</issue>
<code>
[start of setup.py]
1 import os
2 import pathlib
3 import re
4 import sys
5
6 from setuptools import Extension, setup
7
8 if sys.version_info < (3, 7):
9 raise RuntimeError("aiohttp 4.x requires Python 3.7+")
10
11
12 NO_EXTENSIONS = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) # type: bool
13 HERE = pathlib.Path(__file__).parent
14 IS_GIT_REPO = (HERE / ".git").exists()
15
16
17 if sys.implementation.name != "cpython":
18 NO_EXTENSIONS = True
19
20
21 if IS_GIT_REPO and not (HERE / "vendor/http-parser/README.md").exists():
22 print("Install submodules when building from git clone", file=sys.stderr)
23 print("Hint:", file=sys.stderr)
24 print(" git submodule update --init", file=sys.stderr)
25 sys.exit(2)
26
27
28 # NOTE: makefile cythonizes all Cython modules
29
30 extensions = [
31 Extension("aiohttp._websocket", ["aiohttp/_websocket.c"]),
32 Extension(
33 "aiohttp._http_parser",
34 [
35 "aiohttp/_http_parser.c",
36 "vendor/http-parser/http_parser.c",
37 "aiohttp/_find_header.c",
38 ],
39 define_macros=[("HTTP_PARSER_STRICT", 0)],
40 ),
41 Extension("aiohttp._helpers", ["aiohttp/_helpers.c"]),
42 Extension("aiohttp._http_writer", ["aiohttp/_http_writer.c"]),
43 ]
44
45
46 txt = (HERE / "aiohttp" / "__init__.py").read_text("utf-8")
47 try:
48 version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
49 except IndexError:
50 raise RuntimeError("Unable to determine version.")
51
52 install_requires = [
53 "chardet>=2.0,<5.0",
54 "multidict>=4.5,<7.0",
55 "async_timeout>=4.0a2,<5.0",
56 'asynctest==0.13.0; python_version<"3.8"',
57 "yarl>=1.0,<2.0",
58 "typing_extensions>=3.7.4",
59 "frozenlist>=1.1.1",
60 "aiosignal>=1.1.2",
61 ]
62
63
64 def read(f):
65 return (HERE / f).read_text("utf-8").strip()
66
67
68 args = dict(
69 name="aiohttp",
70 version=version,
71 description="Async http client/server framework (asyncio)",
72 long_description="\n\n".join((read("README.rst"), read("CHANGES.rst"))),
73 long_description_content_type="text/x-rst",
74 classifiers=[
75 "License :: OSI Approved :: Apache Software License",
76 "Intended Audience :: Developers",
77 "Programming Language :: Python",
78 "Programming Language :: Python :: 3",
79 "Programming Language :: Python :: 3.7",
80 "Programming Language :: Python :: 3.8",
81 "Programming Language :: Python :: 3.9",
82 "Development Status :: 5 - Production/Stable",
83 "Operating System :: POSIX",
84 "Operating System :: MacOS :: MacOS X",
85 "Operating System :: Microsoft :: Windows",
86 "Topic :: Internet :: WWW/HTTP",
87 "Framework :: AsyncIO",
88 ],
89 author="Nikolay Kim",
90 author_email="[email protected]",
91 maintainer=", ".join(
92 (
93 "Nikolay Kim <[email protected]>",
94 "Andrew Svetlov <[email protected]>",
95 )
96 ),
97 maintainer_email="[email protected]",
98 url="https://github.com/aio-libs/aiohttp",
99 project_urls={
100 "Chat: Gitter": "https://gitter.im/aio-libs/Lobby",
101 "CI: GitHub Actions": "https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI", # noqa
102 "Coverage: codecov": "https://codecov.io/github/aio-libs/aiohttp",
103 "Docs: Changelog": "https://docs.aiohttp.org/en/stable/changes.html",
104 "Docs: RTD": "https://docs.aiohttp.org",
105 "GitHub: issues": "https://github.com/aio-libs/aiohttp/issues",
106 "GitHub: repo": "https://github.com/aio-libs/aiohttp",
107 },
108 license="Apache 2",
109 packages=["aiohttp"],
110 python_requires=">=3.7",
111 install_requires=install_requires,
112 extras_require={
113 "speedups": [
114 "aiodns>=1.1",
115 "Brotli",
116 "cchardet",
117 ],
118 },
119 include_package_data=True,
120 )
121
122 if not NO_EXTENSIONS:
123 print("*********************")
124 print("* Accelerated build *")
125 print("*********************")
126 setup(ext_modules=extensions, **args)
127 else:
128 print("*********************")
129 print("* Pure Python build *")
130 print("*********************")
131 setup(**args)
132
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
NO_EXTENSIONS = True
-if IS_GIT_REPO and not (HERE / "vendor/http-parser/README.md").exists():
+if IS_GIT_REPO and not (HERE / "vendor/llhttp/README.md").exists():
print("Install submodules when building from git clone", file=sys.stderr)
print("Hint:", file=sys.stderr)
print(" git submodule update --init", file=sys.stderr)
@@ -33,10 +33,13 @@
"aiohttp._http_parser",
[
"aiohttp/_http_parser.c",
- "vendor/http-parser/http_parser.c",
"aiohttp/_find_header.c",
+ "vendor/llhttp/build/c/llhttp.c",
+ "vendor/llhttp/src/native/api.c",
+ "vendor/llhttp/src/native/http.c",
],
- define_macros=[("HTTP_PARSER_STRICT", 0)],
+ define_macros=[("LLHTTP_STRICT_MODE", 0)],
+ include_dirs=["vendor/llhttp/build"],
),
Extension("aiohttp._helpers", ["aiohttp/_helpers.c"]),
Extension("aiohttp._http_writer", ["aiohttp/_http_writer.c"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n NO_EXTENSIONS = True\n \n \n-if IS_GIT_REPO and not (HERE / \"vendor/http-parser/README.md\").exists():\n+if IS_GIT_REPO and not (HERE / \"vendor/llhttp/README.md\").exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n@@ -33,10 +33,13 @@\n \"aiohttp._http_parser\",\n [\n \"aiohttp/_http_parser.c\",\n- \"vendor/http-parser/http_parser.c\",\n \"aiohttp/_find_header.c\",\n+ \"vendor/llhttp/build/c/llhttp.c\",\n+ \"vendor/llhttp/src/native/api.c\",\n+ \"vendor/llhttp/src/native/http.c\",\n ],\n- define_macros=[(\"HTTP_PARSER_STRICT\", 0)],\n+ define_macros=[(\"LLHTTP_STRICT_MODE\", 0)],\n+ include_dirs=[\"vendor/llhttp/build\"],\n ),\n Extension(\"aiohttp._helpers\", [\"aiohttp/_helpers.c\"]),\n Extension(\"aiohttp._http_writer\", [\"aiohttp/_http_writer.c\"]),\n", "issue": "Switch http_parser to llhttp\nNodejs decide to move from their hard-coded C parser to this one.\r\n\r\nhttps://github.com/nodejs/http-parser/pull/285#issuecomment-456025694\r\n\r\n*Upd:* \r\nproposed parser is https://llhttp.org\r\n\n", "before_files": [{"content": "import os\nimport pathlib\nimport re\nimport sys\n\nfrom setuptools import Extension, setup\n\nif sys.version_info < (3, 7):\n raise RuntimeError(\"aiohttp 4.x requires Python 3.7+\")\n\n\nNO_EXTENSIONS = bool(os.environ.get(\"AIOHTTP_NO_EXTENSIONS\")) # type: bool\nHERE = pathlib.Path(__file__).parent\nIS_GIT_REPO = (HERE / \".git\").exists()\n\n\nif sys.implementation.name != \"cpython\":\n NO_EXTENSIONS = True\n\n\nif IS_GIT_REPO and not (HERE / \"vendor/http-parser/README.md\").exists():\n print(\"Install submodules when building from git clone\", file=sys.stderr)\n print(\"Hint:\", file=sys.stderr)\n print(\" git submodule update --init\", file=sys.stderr)\n sys.exit(2)\n\n\n# NOTE: makefile cythonizes all Cython modules\n\nextensions = [\n Extension(\"aiohttp._websocket\", [\"aiohttp/_websocket.c\"]),\n Extension(\n \"aiohttp._http_parser\",\n [\n \"aiohttp/_http_parser.c\",\n \"vendor/http-parser/http_parser.c\",\n \"aiohttp/_find_header.c\",\n ],\n define_macros=[(\"HTTP_PARSER_STRICT\", 0)],\n ),\n Extension(\"aiohttp._helpers\", [\"aiohttp/_helpers.c\"]),\n Extension(\"aiohttp._http_writer\", [\"aiohttp/_http_writer.c\"]),\n]\n\n\ntxt = (HERE / \"aiohttp\" / \"__init__.py\").read_text(\"utf-8\")\ntry:\n version = re.findall(r'^__version__ = \"([^\"]+)\"\\r?$', txt, re.M)[0]\nexcept IndexError:\n raise RuntimeError(\"Unable to determine version.\")\n\ninstall_requires = [\n \"chardet>=2.0,<5.0\",\n \"multidict>=4.5,<7.0\",\n \"async_timeout>=4.0a2,<5.0\",\n 'asynctest==0.13.0; python_version<\"3.8\"',\n \"yarl>=1.0,<2.0\",\n \"typing_extensions>=3.7.4\",\n \"frozenlist>=1.1.1\",\n \"aiosignal>=1.1.2\",\n]\n\n\ndef read(f):\n return (HERE / f).read_text(\"utf-8\").strip()\n\n\nargs = dict(\n name=\"aiohttp\",\n version=version,\n description=\"Async http client/server framework (asyncio)\",\n long_description=\"\\n\\n\".join((read(\"README.rst\"), read(\"CHANGES.rst\"))),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Operating System :: POSIX\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Framework :: AsyncIO\",\n ],\n author=\"Nikolay Kim\",\n author_email=\"[email protected]\",\n maintainer=\", \".join(\n (\n \"Nikolay Kim <[email protected]>\",\n \"Andrew Svetlov <[email protected]>\",\n )\n ),\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aiohttp\",\n project_urls={\n \"Chat: Gitter\": \"https://gitter.im/aio-libs/Lobby\",\n \"CI: GitHub Actions\": \"https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI\", # noqa\n \"Coverage: codecov\": \"https://codecov.io/github/aio-libs/aiohttp\",\n \"Docs: Changelog\": \"https://docs.aiohttp.org/en/stable/changes.html\",\n \"Docs: RTD\": \"https://docs.aiohttp.org\",\n \"GitHub: issues\": \"https://github.com/aio-libs/aiohttp/issues\",\n \"GitHub: repo\": \"https://github.com/aio-libs/aiohttp\",\n },\n license=\"Apache 2\",\n packages=[\"aiohttp\"],\n python_requires=\">=3.7\",\n install_requires=install_requires,\n extras_require={\n \"speedups\": [\n \"aiodns>=1.1\",\n \"Brotli\",\n \"cchardet\",\n ],\n },\n include_package_data=True,\n)\n\nif not NO_EXTENSIONS:\n print(\"*********************\")\n print(\"* Accelerated build *\")\n print(\"*********************\")\n setup(ext_modules=extensions, **args)\nelse:\n print(\"*********************\")\n print(\"* Pure Python build *\")\n print(\"*********************\")\n setup(**args)\n", "path": "setup.py"}]} | 1,987 | 294 |
gh_patches_debug_5390 | rasdani/github-patches | git_diff | scalableminds__webknossos-libs-47 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tiled cubing doesn't convert last slice
Tiled cubing doesn't convert last slice
</issue>
<code>
[start of wkcuber/utils.py]
1 import time
2 import wkw
3 import numpy as np
4 import logging
5 from glob import iglob
6 from collections import namedtuple
7 from multiprocessing import cpu_count, Lock
8 from concurrent.futures import ProcessPoolExecutor
9 from os import path
10 from platform import python_version
11 from math import floor, ceil
12
13
14 from .knossos import KnossosDataset, CUBE_EDGE_LEN
15
16
17 WkwDatasetInfo = namedtuple(
18 "WkwDatasetInfo", ("dataset_path", "layer_name", "dtype", "mag")
19 )
20 KnossosDatasetInfo = namedtuple("KnossosDatasetInfo", ("dataset_path", "dtype"))
21
22
23 def _open_wkw(info, **kwargs):
24 header = wkw.Header(np.dtype(info.dtype), **kwargs)
25 ds = wkw.Dataset.open(
26 path.join(info.dataset_path, info.layer_name, str(info.mag)), header
27 )
28 return ds
29
30
31 def open_wkw(info, lock=None, **kwargs):
32 if lock is None:
33 # Create dummy lock
34 lock = Lock()
35
36 with lock:
37 return _open_wkw(info, **kwargs)
38
39
40 def open_knossos(info):
41 return KnossosDataset.open(info.dataset_path, np.dtype(info.dtype))
42
43
44 def add_verbose_flag(parser):
45 parser.add_argument(
46 "--verbose", "-v", help="Verbose output", dest="verbose", action="store_true"
47 )
48
49 parser.set_defaults(verbose=False)
50
51
52 def find_files(source_path, extensions):
53 # Find all files with a matching file extension
54 return (
55 f
56 for f in iglob(source_path, recursive=True)
57 if any([f.endswith(suffix) for suffix in extensions])
58 )
59
60
61 def get_chunks(arr, chunk_size):
62 for i in range(0, len(arr), chunk_size):
63 yield arr[i : i + chunk_size]
64
65
66 def get_regular_chunks(min_z, max_z, chunk_size):
67 i = floor(min_z / chunk_size) * chunk_size
68 while i < ceil(max_z / chunk_size) * chunk_size:
69 yield range(i, i + chunk_size)
70 i += chunk_size
71
72
73 def add_jobs_flag(parser):
74 parser.add_argument(
75 "--jobs", "-j", help="Parallel jobs", type=int, default=cpu_count()
76 )
77
78
79 def pool_init(lock):
80 global process_pool_lock
81 process_pool_lock = lock
82
83
84 def pool_get_lock():
85 global process_pool_lock
86 try:
87 return process_pool_lock
88 except NameError:
89 return None
90
91
92 class ParallelExecutor:
93 def __init__(self, jobs):
94 self.lock = Lock()
95 if python_version() >= "3.7.0":
96 self.exec = ProcessPoolExecutor(
97 jobs, initializer=pool_init, initargs=(self.lock,)
98 )
99 else:
100 self.exec = ProcessPoolExecutor(jobs)
101 self.futures = []
102
103 def submit(self, fn, *args):
104 future = self.exec.submit(fn, *args)
105 self.futures.append(future)
106 return future
107
108 def __enter__(self):
109 self.exec.__enter__()
110 return self
111
112 def __exit__(self, type, value, tb):
113 [f.result() for f in self.futures]
114 self.exec.__exit__(type, value, tb)
115
116
117 times = {}
118
119
120 def time_start(identifier):
121 times[identifier] = time.time()
122
123
124 def time_stop(identifier):
125 _time = times.pop(identifier)
126 logging.debug("{} took {:.8f}s".format(identifier, time.time() - _time))
127
[end of wkcuber/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wkcuber/utils.py b/wkcuber/utils.py
--- a/wkcuber/utils.py
+++ b/wkcuber/utils.py
@@ -63,9 +63,10 @@
yield arr[i : i + chunk_size]
+# min_z and max_z are both inclusive
def get_regular_chunks(min_z, max_z, chunk_size):
i = floor(min_z / chunk_size) * chunk_size
- while i < ceil(max_z / chunk_size) * chunk_size:
+ while i < ceil((max_z + 1) / chunk_size) * chunk_size:
yield range(i, i + chunk_size)
i += chunk_size
| {"golden_diff": "diff --git a/wkcuber/utils.py b/wkcuber/utils.py\n--- a/wkcuber/utils.py\n+++ b/wkcuber/utils.py\n@@ -63,9 +63,10 @@\n yield arr[i : i + chunk_size]\n \n \n+# min_z and max_z are both inclusive\n def get_regular_chunks(min_z, max_z, chunk_size):\n i = floor(min_z / chunk_size) * chunk_size\n- while i < ceil(max_z / chunk_size) * chunk_size:\n+ while i < ceil((max_z + 1) / chunk_size) * chunk_size:\n yield range(i, i + chunk_size)\n i += chunk_size\n", "issue": "Tiled cubing doesn't convert last slice\n\nTiled cubing doesn't convert last slice\n\n", "before_files": [{"content": "import time\nimport wkw\nimport numpy as np\nimport logging\nfrom glob import iglob\nfrom collections import namedtuple\nfrom multiprocessing import cpu_count, Lock\nfrom concurrent.futures import ProcessPoolExecutor\nfrom os import path\nfrom platform import python_version\nfrom math import floor, ceil\n\n\nfrom .knossos import KnossosDataset, CUBE_EDGE_LEN\n\n\nWkwDatasetInfo = namedtuple(\n \"WkwDatasetInfo\", (\"dataset_path\", \"layer_name\", \"dtype\", \"mag\")\n)\nKnossosDatasetInfo = namedtuple(\"KnossosDatasetInfo\", (\"dataset_path\", \"dtype\"))\n\n\ndef _open_wkw(info, **kwargs):\n header = wkw.Header(np.dtype(info.dtype), **kwargs)\n ds = wkw.Dataset.open(\n path.join(info.dataset_path, info.layer_name, str(info.mag)), header\n )\n return ds\n\n\ndef open_wkw(info, lock=None, **kwargs):\n if lock is None:\n # Create dummy lock\n lock = Lock()\n\n with lock:\n return _open_wkw(info, **kwargs)\n\n\ndef open_knossos(info):\n return KnossosDataset.open(info.dataset_path, np.dtype(info.dtype))\n\n\ndef add_verbose_flag(parser):\n parser.add_argument(\n \"--verbose\", \"-v\", help=\"Verbose output\", dest=\"verbose\", action=\"store_true\"\n )\n\n parser.set_defaults(verbose=False)\n\n\ndef find_files(source_path, extensions):\n # Find all files with a matching file extension\n return (\n f\n for f in iglob(source_path, recursive=True)\n if any([f.endswith(suffix) for suffix in extensions])\n )\n\n\ndef get_chunks(arr, chunk_size):\n for i in range(0, len(arr), chunk_size):\n yield arr[i : i + chunk_size]\n\n\ndef get_regular_chunks(min_z, max_z, chunk_size):\n i = floor(min_z / chunk_size) * chunk_size\n while i < ceil(max_z / chunk_size) * chunk_size:\n yield range(i, i + chunk_size)\n i += chunk_size\n\n\ndef add_jobs_flag(parser):\n parser.add_argument(\n \"--jobs\", \"-j\", help=\"Parallel jobs\", type=int, default=cpu_count()\n )\n\n\ndef pool_init(lock):\n global process_pool_lock\n process_pool_lock = lock\n\n\ndef pool_get_lock():\n global process_pool_lock\n try:\n return process_pool_lock\n except NameError:\n return None\n\n\nclass ParallelExecutor:\n def __init__(self, jobs):\n self.lock = Lock()\n if python_version() >= \"3.7.0\":\n self.exec = ProcessPoolExecutor(\n jobs, initializer=pool_init, initargs=(self.lock,)\n )\n else:\n self.exec = ProcessPoolExecutor(jobs)\n self.futures = []\n\n def submit(self, fn, *args):\n future = self.exec.submit(fn, *args)\n self.futures.append(future)\n return future\n\n def __enter__(self):\n self.exec.__enter__()\n return self\n\n def __exit__(self, type, value, tb):\n [f.result() for f in self.futures]\n self.exec.__exit__(type, value, tb)\n\n\ntimes = {}\n\n\ndef time_start(identifier):\n times[identifier] = time.time()\n\n\ndef time_stop(identifier):\n _time = times.pop(identifier)\n logging.debug(\"{} took {:.8f}s\".format(identifier, time.time() - _time))\n", "path": "wkcuber/utils.py"}]} | 1,591 | 147 |
gh_patches_debug_41984 | rasdani/github-patches | git_diff | Mailu__Mailu-769 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Setup: generated files missing
Traefik is ending up in a looped redirect when trying to download the generated files. Flask blueprint context to be modified to simplify the forwarding and eliminate the need for redirects.
</issue>
<code>
[start of setup/server.py]
1 import flask
2 import flask_bootstrap
3 import redis
4 import json
5 import os
6 import jinja2
7 import uuid
8 import string
9 import random
10 import ipaddress
11
12
13 app = flask.Flask(__name__)
14 flask_bootstrap.Bootstrap(app)
15 db = redis.StrictRedis(host='redis', port=6379, db=0)
16
17
18 def render_flavor(flavor, template, data):
19 return flask.render_template(
20 os.path.join(flavor, template),
21 **data
22 )
23
24
25 @app.add_template_global
26 def secret(length=16):
27 charset = string.ascii_uppercase + string.digits
28 return ''.join(
29 random.SystemRandom().choice(charset)
30 for _ in range(length)
31 )
32
33
34 def build_app(path):
35
36 app.jinja_env.trim_blocks = True
37 app.jinja_env.lstrip_blocks = True
38
39 @app.context_processor
40 def app_context():
41 return dict(versions=os.getenv("VERSIONS","master").split(','))
42
43 version = os.getenv("this_version")
44
45 bp = flask.Blueprint(version, __name__)
46 bp.jinja_loader = jinja2.ChoiceLoader([
47 jinja2.FileSystemLoader(os.path.join(path, "templates")),
48 jinja2.FileSystemLoader(os.path.join(path, "flavors"))
49 ])
50
51 @bp.context_processor
52 def bp_context(version=version):
53 return dict(version=version)
54
55 @bp.route("/")
56 def wizard():
57 return flask.render_template('wizard.html')
58
59 @bp.route("/submit_flavor", methods=["POST"])
60 def submit_flavor():
61 data = flask.request.form.copy()
62 steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
63 return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps)
64
65 @bp.route("/submit", methods=["POST"])
66 def submit():
67 data = flask.request.form.copy()
68 data['uid'] = str(uuid.uuid4())
69 data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])
70 db.set(data['uid'], json.dumps(data))
71 return flask.redirect(flask.url_for('.setup', uid=data['uid']))
72
73 @bp.route("/setup/<uid>", methods=["GET"])
74 def setup(uid):
75 data = json.loads(db.get(uid))
76 flavor = data.get("flavor", "compose")
77 rendered = render_flavor(flavor, "setup.html", data)
78 return flask.render_template("setup.html", contents=rendered)
79
80 @bp.route("/file/<uid>/<filepath>", methods=["GET"])
81 def file(uid, filepath):
82 data = json.loads(db.get(uid))
83 flavor = data.get("flavor", "compose")
84 return flask.Response(
85 render_flavor(flavor, filepath, data),
86 mimetype="application/text"
87 )
88
89 app.register_blueprint(bp, url_prefix="/{}".format(version))
90
91
92 if __name__ == "__main__":
93 build_app("/tmp/mailutest")
94 app.run(debug=True)
95
[end of setup/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup/server.py b/setup/server.py
--- a/setup/server.py
+++ b/setup/server.py
@@ -10,7 +10,9 @@
import ipaddress
-app = flask.Flask(__name__)
+version = os.getenv("this_version")
+static_url_path = "/" + version + "/static"
+app = flask.Flask(__name__, static_url_path=static_url_path)
flask_bootstrap.Bootstrap(app)
db = redis.StrictRedis(host='redis', port=6379, db=0)
@@ -40,29 +42,37 @@
def app_context():
return dict(versions=os.getenv("VERSIONS","master").split(','))
- version = os.getenv("this_version")
+ prefix_bp = flask.Blueprint(version, __name__)
+ prefix_bp.jinja_loader = jinja2.ChoiceLoader([
+ jinja2.FileSystemLoader(os.path.join(path, "templates")),
+ jinja2.FileSystemLoader(os.path.join(path, "flavors"))
+ ])
- bp = flask.Blueprint(version, __name__)
- bp.jinja_loader = jinja2.ChoiceLoader([
+ root_bp = flask.Blueprint("root", __name__)
+ root_bp.jinja_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader(os.path.join(path, "templates")),
jinja2.FileSystemLoader(os.path.join(path, "flavors"))
])
- @bp.context_processor
+ @prefix_bp.context_processor
+ @root_bp.context_processor
def bp_context(version=version):
return dict(version=version)
- @bp.route("/")
+ @prefix_bp.route("/")
+ @root_bp.route("/")
def wizard():
return flask.render_template('wizard.html')
- @bp.route("/submit_flavor", methods=["POST"])
+ @prefix_bp.route("/submit_flavor", methods=["POST"])
+ @root_bp.route("/submit_flavor", methods=["POST"])
def submit_flavor():
data = flask.request.form.copy()
steps = sorted(os.listdir(os.path.join(path, "templates", "steps", data["flavor"])))
return flask.render_template('wizard.html', flavor=data["flavor"], steps=steps)
- @bp.route("/submit", methods=["POST"])
+ @prefix_bp.route("/submit", methods=["POST"])
+ @root_bp.route("/submit", methods=["POST"])
def submit():
data = flask.request.form.copy()
data['uid'] = str(uuid.uuid4())
@@ -70,14 +80,16 @@
db.set(data['uid'], json.dumps(data))
return flask.redirect(flask.url_for('.setup', uid=data['uid']))
- @bp.route("/setup/<uid>", methods=["GET"])
+ @prefix_bp.route("/setup/<uid>", methods=["GET"])
+ @root_bp.route("/setup/<uid>", methods=["GET"])
def setup(uid):
data = json.loads(db.get(uid))
flavor = data.get("flavor", "compose")
rendered = render_flavor(flavor, "setup.html", data)
return flask.render_template("setup.html", contents=rendered)
- @bp.route("/file/<uid>/<filepath>", methods=["GET"])
+ @prefix_bp.route("/file/<uid>/<filepath>", methods=["GET"])
+ @root_bp.route("/file/<uid>/<filepath>", methods=["GET"])
def file(uid, filepath):
data = json.loads(db.get(uid))
flavor = data.get("flavor", "compose")
@@ -86,7 +98,8 @@
mimetype="application/text"
)
- app.register_blueprint(bp, url_prefix="/{}".format(version))
+ app.register_blueprint(prefix_bp, url_prefix="/{}".format(version))
+ app.register_blueprint(root_bp)
if __name__ == "__main__":
| {"golden_diff": "diff --git a/setup/server.py b/setup/server.py\n--- a/setup/server.py\n+++ b/setup/server.py\n@@ -10,7 +10,9 @@\n import ipaddress\n \n \n-app = flask.Flask(__name__)\n+version = os.getenv(\"this_version\")\n+static_url_path = \"/\" + version + \"/static\"\n+app = flask.Flask(__name__, static_url_path=static_url_path)\n flask_bootstrap.Bootstrap(app)\n db = redis.StrictRedis(host='redis', port=6379, db=0)\n \n@@ -40,29 +42,37 @@\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n \n- version = os.getenv(\"this_version\")\n+ prefix_bp = flask.Blueprint(version, __name__)\n+ prefix_bp.jinja_loader = jinja2.ChoiceLoader([\n+ jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n+ jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n+ ])\n \n- bp = flask.Blueprint(version, __name__)\n- bp.jinja_loader = jinja2.ChoiceLoader([\n+ root_bp = flask.Blueprint(\"root\", __name__)\n+ root_bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n \n- @bp.context_processor\n+ @prefix_bp.context_processor\n+ @root_bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n \n- @bp.route(\"/\")\n+ @prefix_bp.route(\"/\")\n+ @root_bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n \n- @bp.route(\"/submit_flavor\", methods=[\"POST\"])\n+ @prefix_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n+ @root_bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n \n- @bp.route(\"/submit\", methods=[\"POST\"])\n+ @prefix_bp.route(\"/submit\", methods=[\"POST\"])\n+ @root_bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n@@ -70,14 +80,16 @@\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n \n- @bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n+ @prefix_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n+ @root_bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n \n- @bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n+ @prefix_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n+ @root_bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n@@ -86,7 +98,8 @@\n mimetype=\"application/text\"\n )\n \n- app.register_blueprint(bp, url_prefix=\"/{}\".format(version))\n+ app.register_blueprint(prefix_bp, url_prefix=\"/{}\".format(version))\n+ app.register_blueprint(root_bp)\n \n \n if __name__ == \"__main__\":\n", "issue": "Setup: generated files missing\nTraefik is ending up in a looped redirect when trying to download the generated files. Flask blueprint context to be modified to simplify the forwarding and eliminate the need for redirects.\n", "before_files": [{"content": "import flask\nimport flask_bootstrap\nimport redis\nimport json\nimport os\nimport jinja2\nimport uuid\nimport string\nimport random\nimport ipaddress\n\n\napp = flask.Flask(__name__)\nflask_bootstrap.Bootstrap(app)\ndb = redis.StrictRedis(host='redis', port=6379, db=0)\n\n\ndef render_flavor(flavor, template, data):\n return flask.render_template(\n os.path.join(flavor, template),\n **data\n )\n\n\[email protected]_template_global\ndef secret(length=16):\n charset = string.ascii_uppercase + string.digits\n return ''.join(\n random.SystemRandom().choice(charset)\n for _ in range(length)\n )\n\n\ndef build_app(path):\n\n app.jinja_env.trim_blocks = True\n app.jinja_env.lstrip_blocks = True\n\n @app.context_processor\n def app_context():\n return dict(versions=os.getenv(\"VERSIONS\",\"master\").split(','))\n\n version = os.getenv(\"this_version\")\n\n bp = flask.Blueprint(version, __name__)\n bp.jinja_loader = jinja2.ChoiceLoader([\n jinja2.FileSystemLoader(os.path.join(path, \"templates\")),\n jinja2.FileSystemLoader(os.path.join(path, \"flavors\"))\n ])\n\n @bp.context_processor\n def bp_context(version=version):\n return dict(version=version)\n\n @bp.route(\"/\")\n def wizard():\n return flask.render_template('wizard.html')\n\n @bp.route(\"/submit_flavor\", methods=[\"POST\"])\n def submit_flavor():\n data = flask.request.form.copy()\n steps = sorted(os.listdir(os.path.join(path, \"templates\", \"steps\", data[\"flavor\"])))\n return flask.render_template('wizard.html', flavor=data[\"flavor\"], steps=steps)\n\n @bp.route(\"/submit\", methods=[\"POST\"])\n def submit():\n data = flask.request.form.copy()\n data['uid'] = str(uuid.uuid4())\n data['dns'] = str(ipaddress.IPv4Network(data['subnet'])[-2])\n db.set(data['uid'], json.dumps(data))\n return flask.redirect(flask.url_for('.setup', uid=data['uid']))\n\n @bp.route(\"/setup/<uid>\", methods=[\"GET\"])\n def setup(uid):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n rendered = render_flavor(flavor, \"setup.html\", data)\n return flask.render_template(\"setup.html\", contents=rendered)\n\n @bp.route(\"/file/<uid>/<filepath>\", methods=[\"GET\"])\n def file(uid, filepath):\n data = json.loads(db.get(uid))\n flavor = data.get(\"flavor\", \"compose\")\n return flask.Response(\n render_flavor(flavor, filepath, data),\n mimetype=\"application/text\"\n )\n\n app.register_blueprint(bp, url_prefix=\"/{}\".format(version))\n\n\nif __name__ == \"__main__\":\n build_app(\"/tmp/mailutest\")\n app.run(debug=True)\n", "path": "setup/server.py"}]} | 1,401 | 833 |
gh_patches_debug_16544 | rasdani/github-patches | git_diff | e-valuation__EvaP-1428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Contributors missing in editor evaluation edit form
When editing an evaluation as an editor, contributors who have already been added (by managers) and who are marked as inactive or proxy users are not shown in the form anymore because they are not included in the contributor queryset. This leads to errors when saving the form.
The queryset should always include those people already added as contributors for this evaluation.
</issue>
<code>
[start of evap/contributor/forms.py]
1 from datetime import datetime, timedelta
2 import logging
3
4 from django import forms
5 from django.conf import settings
6 from django.db.models import Q
7 from django.forms.widgets import CheckboxSelectMultiple
8 from django.utils.translation import ugettext_lazy as _
9 from evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField
10 from evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile
11 from evap.evaluation.tools import date_to_datetime
12 from evap.staff.forms import ContributionForm
13
14 logger = logging.getLogger(__name__)
15
16
17 class EvaluationForm(forms.ModelForm):
18 general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_("General questionnaires"))
19 course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())
20 name_de_field = forms.CharField(label=_("Name (German)"), disabled=True, required=False)
21 name_en_field = forms.CharField(label=_("Name (English)"), disabled=True, required=False)
22
23 class Meta:
24 model = Evaluation
25 fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')
26
27 def __init__(self, *args, **kwargs):
28 super().__init__(*args, **kwargs)
29
30 self.fields['name_de_field'].initial = self.instance.full_name_de
31 self.fields['name_en_field'].initial = self.instance.full_name_en
32
33 self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(
34 Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()
35
36 self.fields['vote_start_datetime'].localize = True
37 self.fields['vote_end_date'].localize = True
38
39 if self.instance.general_contribution:
40 self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]
41
42 def clean(self):
43 super().clean()
44
45 vote_start_datetime = self.cleaned_data.get('vote_start_datetime')
46 vote_end_date = self.cleaned_data.get('vote_end_date')
47 if vote_start_datetime and vote_end_date:
48 if vote_start_datetime.date() > vote_end_date:
49 self.add_error("vote_start_datetime", "")
50 self.add_error("vote_end_date", _("The first day of evaluation must be before the last one."))
51
52 def clean_vote_end_date(self):
53 vote_end_date = self.cleaned_data.get('vote_end_date')
54
55 # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.
56 # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.
57 if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():
58 raise forms.ValidationError(_("The last day of evaluation must be in the future."))
59 return vote_end_date
60
61 def save(self, *args, **kw):
62 evaluation = super().save(*args, **kw)
63 evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))
64 return evaluation
65
66
67 class EditorContributionForm(ContributionForm):
68 def __init__(self, *args, **kwargs):
69 super().__init__(*args, **kwargs)
70
71 self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(
72 Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()
73 self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)
74
75
76 class DelegatesForm(forms.ModelForm):
77 delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),
78 required=False)
79
80 class Meta:
81 model = UserProfile
82 fields = ('delegates',)
83 field_classes = {
84 'delegates': UserModelMultipleChoiceField,
85 }
86
87 def __init__(self, *args, **kwargs):
88 super().__init__(*args, **kwargs)
89
90 def save(self, *args, **kw):
91 super().save(*args, **kw)
92 logger.info('User "{}" edited the settings.'.format(self.instance.username))
93
94
95 class DelegateSelectionForm(forms.Form):
96 delegate_to = UserModelChoiceField(label=_("Delegate to"),
97 queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))
98
[end of evap/contributor/forms.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py
--- a/evap/contributor/forms.py
+++ b/evap/contributor/forms.py
@@ -68,9 +68,13 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
+ existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None
+
self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(
Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()
- self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)
+ self.fields['contributor'].queryset = UserProfile.objects.filter(
+ (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)
+ )
class DelegatesForm(forms.ModelForm):
| {"golden_diff": "diff --git a/evap/contributor/forms.py b/evap/contributor/forms.py\n--- a/evap/contributor/forms.py\n+++ b/evap/contributor/forms.py\n@@ -68,9 +68,13 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n+ existing_contributor_pk = self.instance.contributor.pk if self.instance.contributor else None\n+\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n- self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)\n+ self.fields['contributor'].queryset = UserProfile.objects.filter(\n+ (Q(is_active=True) & Q(is_proxy_user=False)) | Q(pk=existing_contributor_pk)\n+ )\n \n \n class DelegatesForm(forms.ModelForm):\n", "issue": "Contributors missing in editor evaluation edit form\nWhen editing an evaluation as an editor, contributors who have already been added (by managers) and who are marked as inactive or proxy users are not shown in the form anymore because they are not included in the contributor queryset. This leads to errors when saving the form.\r\nThe queryset should always include those people already added as contributors for this evaluation.\n", "before_files": [{"content": "from datetime import datetime, timedelta\nimport logging\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom django.utils.translation import ugettext_lazy as _\nfrom evap.evaluation.forms import UserModelMultipleChoiceField, UserModelChoiceField\nfrom evap.evaluation.models import Course, Evaluation, Questionnaire, UserProfile\nfrom evap.evaluation.tools import date_to_datetime\nfrom evap.staff.forms import ContributionForm\n\nlogger = logging.getLogger(__name__)\n\n\nclass EvaluationForm(forms.ModelForm):\n general_questionnaires = forms.ModelMultipleChoiceField(queryset=None, widget=CheckboxSelectMultiple, label=_(\"General questionnaires\"))\n course = forms.ModelChoiceField(Course.objects.all(), disabled=True, required=False, widget=forms.HiddenInput())\n name_de_field = forms.CharField(label=_(\"Name (German)\"), disabled=True, required=False)\n name_en_field = forms.CharField(label=_(\"Name (English)\"), disabled=True, required=False)\n\n class Meta:\n model = Evaluation\n fields = ('name_de_field', 'name_en_field', 'vote_start_datetime', 'vote_end_date', 'general_questionnaires', 'course')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['name_de_field'].initial = self.instance.full_name_de\n self.fields['name_en_field'].initial = self.instance.full_name_en\n\n self.fields['general_questionnaires'].queryset = Questionnaire.objects.general_questionnaires().filter(\n Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.instance)).distinct()\n\n self.fields['vote_start_datetime'].localize = True\n self.fields['vote_end_date'].localize = True\n\n if self.instance.general_contribution:\n self.fields['general_questionnaires'].initial = [q.pk for q in self.instance.general_contribution.questionnaires.all()]\n\n def clean(self):\n super().clean()\n\n vote_start_datetime = self.cleaned_data.get('vote_start_datetime')\n vote_end_date = self.cleaned_data.get('vote_end_date')\n if vote_start_datetime and vote_end_date:\n if vote_start_datetime.date() > vote_end_date:\n self.add_error(\"vote_start_datetime\", \"\")\n self.add_error(\"vote_end_date\", _(\"The first day of evaluation must be before the last one.\"))\n\n def clean_vote_end_date(self):\n vote_end_date = self.cleaned_data.get('vote_end_date')\n\n # The actual deadline is EVALUATION_END_OFFSET_HOURS:00 AM of the day after vote_end_date.\n # Therefore an evaluation date 24h + EVALUATION_END_OFFSET_HOURS in the past would technically still be in the future.\n if vote_end_date and date_to_datetime(vote_end_date) + timedelta(hours=24 + settings.EVALUATION_END_OFFSET_HOURS) < datetime.now():\n raise forms.ValidationError(_(\"The last day of evaluation must be in the future.\"))\n return vote_end_date\n\n def save(self, *args, **kw):\n evaluation = super().save(*args, **kw)\n evaluation.general_contribution.questionnaires.set(self.cleaned_data.get('general_questionnaires'))\n return evaluation\n\n\nclass EditorContributionForm(ContributionForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.fields['questionnaires'].queryset = Questionnaire.objects.contributor_questionnaires().filter(\n Q(visibility=Questionnaire.EDITORS) | Q(contributions__evaluation=self.evaluation)).distinct()\n self.fields['contributor'].queryset = UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True)\n\n\nclass DelegatesForm(forms.ModelForm):\n delegates = UserModelMultipleChoiceField(queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True),\n required=False)\n\n class Meta:\n model = UserProfile\n fields = ('delegates',)\n field_classes = {\n 'delegates': UserModelMultipleChoiceField,\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def save(self, *args, **kw):\n super().save(*args, **kw)\n logger.info('User \"{}\" edited the settings.'.format(self.instance.username))\n\n\nclass DelegateSelectionForm(forms.Form):\n delegate_to = UserModelChoiceField(label=_(\"Delegate to\"),\n queryset=UserProfile.objects.exclude(is_active=False).exclude(is_proxy_user=True))\n", "path": "evap/contributor/forms.py"}]} | 1,760 | 221 |
gh_patches_debug_5473 | rasdani/github-patches | git_diff | napari__napari-1250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
working on fixing tests
## 🐛 Bug
here are some crazy observations that started from looking into the test failure in #923, and may have some implications for general test fixes that we want to look into in #921. I'll patch it in a PR soon, but just want to document this somewhere, in case it becomes pertinent to #921
on a mac (but we observe it in linux tests too), with python 3.7, using pyqt5 (I'm on 5.14.1), running this code:
```python
import numpy as np
import pytest # <-- this unused import is required for the bug!
import napari
with napari.gui_qt():
data = np.random.random((10, 15, 20))
viewer = napari.view_image(data)
viewer = napari.view_image(data)
viewer.dims.ndisplay = 3
viewer.dims.ndisplay = 2
```
leads to an error deep in vispy that culminates in:
```
lib/python3.7/site-packages/vispy/gloo/glir.py in _validate(self)
1299 gl.GL_VALIDATE_STATUS):
1300 raise RuntimeError('Program validation error:\n%s'
-> 1301 % gl.glGetProgramInfoLog(self._handle))
1302
1303 def _post_draw(self):
RuntimeError: Program validation error:
```
observations:
- only happens with python 3.7; python 3.6 & 3.8 are fine
- only happens with pyqt5, not pyside2
- only happens with the **double** viewer instantiation (two calls to `view_image`), assigning both viewers to the same variable name. If you name the second viewer `viewer2`, it's fine
- only happens when you switch from `ndisplay=` 2, then to 3, back to 2
- the error occurs only once per session. running the same code a second time works fine.
- pytest is key. if you comment out the `import pytest` line, you don't get the error...
- i thought it might have something to do with our adding of 'viewer' to console globals... but removing the `self.console = QtConsole({'viewer': self.viewer})` from our `QtConsole` instantiation doesn't change anything.
</issue>
<code>
[start of napari/__init__.py]
1 from ._version import get_versions
2
3 __version__ = get_versions()['version']
4 del get_versions
5
6 import os
7 from distutils.version import StrictVersion
8 from pathlib import Path
9 from qtpy import API_NAME
10 from ._version import get_versions
11
12 # putting up higher due to circular imports if plugin exceptions are raised
13 # on startup (we need to be able to show the napari version in the traceback.)
14 __version__ = get_versions()['version']
15 del get_versions
16
17
18 if API_NAME == 'PySide2':
19 # Set plugin path appropriately if using PySide2. This is a bug fix
20 # for when both PyQt5 and Pyside2 are installed
21 import PySide2
22
23 os.environ['QT_PLUGIN_PATH'] = str(
24 Path(PySide2.__file__).parent / 'Qt' / 'plugins'
25 )
26
27 from qtpy import QtCore
28
29 # When QT is not the specific version, we raise a warning:
30 from warnings import warn
31
32 if StrictVersion(QtCore.__version__) < StrictVersion('5.12.3'):
33 warn_message = f"""
34 napari was tested with QT library `>=5.12.3`.
35 The version installed is {QtCore.__version__}. Please report any issues with this
36 specific QT version at https://github.com/Napari/napari/issues.
37 """
38 warn(message=warn_message)
39
40 from vispy import app
41 import logging
42
43 # set vispy application to the appropriate qt backend
44 app.use_app(API_NAME)
45 del app
46 # set vispy logger to show warning and errors only
47 vispy_logger = logging.getLogger('vispy')
48 vispy_logger.setLevel(logging.WARNING)
49
50 from .viewer import Viewer
51 from .plugins.io import save_layers
52
53 # Note that importing _viewer_key_bindings is needed as the Viewer gets
54 # decorated with keybindings during that process, but it is not directly needed
55 # by our users and so is deleted below
56 from . import _viewer_key_bindings # noqa: F401
57 from .view_layers import (
58 view_path,
59 view_image,
60 view_labels,
61 view_surface,
62 view_shapes,
63 view_points,
64 view_vectors,
65 )
66 from ._qt import gui_qt
67 from .utils import sys_info, _magicgui
68
69 # register napari object types with magicgui if it is installed
70 _magicgui.register_types_with_magicgui()
71
72
73 # this unused import is here to fix a very strange bug.
74 # there is some mysterious magical goodness in scipy stats that needs
75 # to be imported early.
76 # see: https://github.com/napari/napari/issues/925
77 from scipy import stats # noqa: F401
78
79 del _magicgui
80 del stats
81 del _viewer_key_bindings
82
[end of napari/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/napari/__init__.py b/napari/__init__.py
--- a/napari/__init__.py
+++ b/napari/__init__.py
@@ -69,13 +69,5 @@
# register napari object types with magicgui if it is installed
_magicgui.register_types_with_magicgui()
-
-# this unused import is here to fix a very strange bug.
-# there is some mysterious magical goodness in scipy stats that needs
-# to be imported early.
-# see: https://github.com/napari/napari/issues/925
-from scipy import stats # noqa: F401
-
del _magicgui
-del stats
del _viewer_key_bindings
| {"golden_diff": "diff --git a/napari/__init__.py b/napari/__init__.py\n--- a/napari/__init__.py\n+++ b/napari/__init__.py\n@@ -69,13 +69,5 @@\n # register napari object types with magicgui if it is installed\n _magicgui.register_types_with_magicgui()\n \n-\n-# this unused import is here to fix a very strange bug.\n-# there is some mysterious magical goodness in scipy stats that needs\n-# to be imported early.\n-# see: https://github.com/napari/napari/issues/925\n-from scipy import stats # noqa: F401\n-\n del _magicgui\n-del stats\n del _viewer_key_bindings\n", "issue": "working on fixing tests\n## \ud83d\udc1b Bug\r\n\r\nhere are some crazy observations that started from looking into the test failure in #923, and may have some implications for general test fixes that we want to look into in #921. I'll patch it in a PR soon, but just want to document this somewhere, in case it becomes pertinent to #921\r\n\r\non a mac (but we observe it in linux tests too), with python 3.7, using pyqt5 (I'm on 5.14.1), running this code:\r\n```python\r\nimport numpy as np\r\nimport pytest # <-- this unused import is required for the bug!\r\nimport napari\r\n\r\nwith napari.gui_qt():\r\n data = np.random.random((10, 15, 20))\r\n viewer = napari.view_image(data)\r\n viewer = napari.view_image(data)\r\n viewer.dims.ndisplay = 3\r\n viewer.dims.ndisplay = 2\r\n```\r\nleads to an error deep in vispy that culminates in:\r\n```\r\nlib/python3.7/site-packages/vispy/gloo/glir.py in _validate(self)\r\n 1299 gl.GL_VALIDATE_STATUS):\r\n 1300 raise RuntimeError('Program validation error:\\n%s'\r\n-> 1301 % gl.glGetProgramInfoLog(self._handle))\r\n 1302 \r\n 1303 def _post_draw(self):\r\n\r\nRuntimeError: Program validation error:\r\n```\r\nobservations:\r\n- only happens with python 3.7; python 3.6 & 3.8 are fine\r\n- only happens with pyqt5, not pyside2\r\n- only happens with the **double** viewer instantiation (two calls to `view_image`), assigning both viewers to the same variable name. If you name the second viewer `viewer2`, it's fine\r\n- only happens when you switch from `ndisplay=` 2, then to 3, back to 2\r\n- the error occurs only once per session. running the same code a second time works fine.\r\n- pytest is key. if you comment out the `import pytest` line, you don't get the error...\r\n- i thought it might have something to do with our adding of 'viewer' to console globals... but removing the `self.console = QtConsole({'viewer': self.viewer})` from our `QtConsole` instantiation doesn't change anything.\r\n\n", "before_files": [{"content": "from ._version import get_versions\n\n__version__ = get_versions()['version']\ndel get_versions\n\nimport os\nfrom distutils.version import StrictVersion\nfrom pathlib import Path\nfrom qtpy import API_NAME\nfrom ._version import get_versions\n\n# putting up higher due to circular imports if plugin exceptions are raised\n# on startup (we need to be able to show the napari version in the traceback.)\n__version__ = get_versions()['version']\ndel get_versions\n\n\nif API_NAME == 'PySide2':\n # Set plugin path appropriately if using PySide2. This is a bug fix\n # for when both PyQt5 and Pyside2 are installed\n import PySide2\n\n os.environ['QT_PLUGIN_PATH'] = str(\n Path(PySide2.__file__).parent / 'Qt' / 'plugins'\n )\n\nfrom qtpy import QtCore\n\n# When QT is not the specific version, we raise a warning:\nfrom warnings import warn\n\nif StrictVersion(QtCore.__version__) < StrictVersion('5.12.3'):\n warn_message = f\"\"\"\n napari was tested with QT library `>=5.12.3`.\n The version installed is {QtCore.__version__}. Please report any issues with this\n specific QT version at https://github.com/Napari/napari/issues.\n \"\"\"\n warn(message=warn_message)\n\nfrom vispy import app\nimport logging\n\n# set vispy application to the appropriate qt backend\napp.use_app(API_NAME)\ndel app\n# set vispy logger to show warning and errors only\nvispy_logger = logging.getLogger('vispy')\nvispy_logger.setLevel(logging.WARNING)\n\nfrom .viewer import Viewer\nfrom .plugins.io import save_layers\n\n# Note that importing _viewer_key_bindings is needed as the Viewer gets\n# decorated with keybindings during that process, but it is not directly needed\n# by our users and so is deleted below\nfrom . import _viewer_key_bindings # noqa: F401\nfrom .view_layers import (\n view_path,\n view_image,\n view_labels,\n view_surface,\n view_shapes,\n view_points,\n view_vectors,\n)\nfrom ._qt import gui_qt\nfrom .utils import sys_info, _magicgui\n\n# register napari object types with magicgui if it is installed\n_magicgui.register_types_with_magicgui()\n\n\n# this unused import is here to fix a very strange bug.\n# there is some mysterious magical goodness in scipy stats that needs\n# to be imported early.\n# see: https://github.com/napari/napari/issues/925\nfrom scipy import stats # noqa: F401\n\ndel _magicgui\ndel stats\ndel _viewer_key_bindings\n", "path": "napari/__init__.py"}]} | 1,802 | 162 |
gh_patches_debug_5727 | rasdani/github-patches | git_diff | secdev__scapy-1040 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems while importing scapy on some environments (e.g. Eclipse, some peculiarly configured Windows boxes etc.)
Scapy Version: v2.3.3-1008
System: Windows10
Python Version: 2.7.14
Updated (closed) issue: [#1029](https://github.com/secdev/scapy/issues/1029l)
File: scapy/\_\_init\_\_.py
There are a problems of using master/scapy on some environments like e.g. Eclipse, some peculiarly configured Windows - mostly old - boxes etc.)
When scapy is started/imported and git is installed, some console windows appears while _version() is performed.
Also this issue has impact on decrease of scapy launch/import speed.
Problem (unexpected lauching of git, git bash console window appearing) exist because of
condition in _version_from_git_describe() does not works as expected on my/our boxes (old Windows?, old buggy Python 2.7.8 where '/' path separator is not treated in the same way as '\\'? nevermind...)
Simple fix below fully avoids our problems:
```
diff --git a/scapy/__init__.py b/scapy/__init__.py
index 4893a9f..4f38578 100644
--- a/scapy/__init__.py
+++ b/scapy/__init__.py
@@ -38,7 +38,7 @@ def _version_from_git_describe():
>>> _version_from_git_describe()
'2.3.2.dev346'
"""
- if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):
+ if not os.path.isdir(os.path.join(os.path.dirname(_SCAPY_PKG_DIR), '.git')):
raise ValueError('not in scapy git repo')
p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
```
Thanks,
Adam Karpierz
</issue>
<code>
[start of scapy/__init__.py]
1 ## This file is part of Scapy
2 ## See http://www.secdev.org/projects/scapy for more informations
3 ## Copyright (C) Philippe Biondi <[email protected]>
4 ## This program is published under a GPLv2 license
5
6 """
7 Scapy: create, send, sniff, dissect and manipulate network packets.
8
9 Usable either from an interactive console or as a Python library.
10 http://www.secdev.org/projects/scapy
11 """
12
13 import os
14 import re
15 import subprocess
16
17
18 _SCAPY_PKG_DIR = os.path.dirname(__file__)
19
20 def _version_from_git_describe():
21 """
22 Read the version from ``git describe``. It returns the latest tag with an
23 optional suffix if the current directory is not exactly on the tag.
24
25 Example::
26
27 $ git describe --always
28 v2.3.2-346-g164a52c075c8
29
30 The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are
31 removed if present.
32
33 If the current directory is not exactly on the tag, a ``.devN`` suffix is
34 appended where N is the number of commits made after the last tag.
35
36 Example::
37
38 >>> _version_from_git_describe()
39 '2.3.2.dev346'
40 """
41 if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):
42 raise ValueError('not in scapy git repo')
43
44 p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
45 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
46
47 out, err = p.communicate()
48
49 if p.returncode == 0:
50 tag = out.decode().strip()
51 match = re.match('^v?(.+?)-(\\d+)-g[a-f0-9]+$', tag)
52 if match:
53 # remove the 'v' prefix and add a '.devN' suffix
54 return '%s.dev%s' % (match.group(1), match.group(2))
55 else:
56 # just remove the 'v' prefix
57 return re.sub('^v', '', tag)
58 else:
59 raise subprocess.CalledProcessError(p.returncode, err)
60
61 def _version():
62 version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')
63 try:
64 tag = _version_from_git_describe()
65 # successfully read the tag from git, write it in VERSION for
66 # installation and/or archive generation.
67 with open(version_file, 'w') as f:
68 f.write(tag)
69 return tag
70 except:
71 # failed to read the tag from git, try to read it from a VERSION file
72 try:
73 with open(version_file, 'r') as f:
74 tag = f.read()
75 return tag
76 except:
77 # Rely on git archive "export-subst" git attribute.
78 # See 'man gitattributes' for more details.
79 git_archive_id = '$Format:%h %d$'
80 sha1 = git_archive_id.strip().split()[0]
81 match = re.search('tag:(\\S+)', git_archive_id)
82 if match:
83 return "git-archive.dev" + match.group(1)
84 elif sha1:
85 return "git-archive.dev" + sha1
86 else:
87 return 'unknown.version'
88
89 VERSION = _version()
90
91 if __name__ == "__main__":
92 from scapy.main import interact
93 interact()
94
[end of scapy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scapy/__init__.py b/scapy/__init__.py
--- a/scapy/__init__.py
+++ b/scapy/__init__.py
@@ -38,7 +38,7 @@
>>> _version_from_git_describe()
'2.3.2.dev346'
"""
- if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):
+ if not os.path.isdir(os.path.join(os.path.dirname(_SCAPY_PKG_DIR), '.git')):
raise ValueError('not in scapy git repo')
p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,
| {"golden_diff": "diff --git a/scapy/__init__.py b/scapy/__init__.py\n--- a/scapy/__init__.py\n+++ b/scapy/__init__.py\n@@ -38,7 +38,7 @@\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n- if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):\n+ if not os.path.isdir(os.path.join(os.path.dirname(_SCAPY_PKG_DIR), '.git')):\n raise ValueError('not in scapy git repo')\n \n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n", "issue": "Problems while importing scapy on some environments (e.g. Eclipse, some peculiarly configured Windows boxes etc.) \nScapy Version: v2.3.3-1008\r\nSystem: Windows10\r\nPython Version: 2.7.14\r\n\r\nUpdated (closed) issue: [#1029](https://github.com/secdev/scapy/issues/1029l)\r\n\r\nFile: scapy/\\_\\_init\\_\\_.py\r\n\r\nThere are a problems of using master/scapy on some environments like e.g. Eclipse, some peculiarly configured Windows - mostly old - boxes etc.)\r\nWhen scapy is started/imported and git is installed, some console windows appears while _version() is performed.\r\nAlso this issue has impact on decrease of scapy launch/import speed.\r\n\r\nProblem (unexpected lauching of git, git bash console window appearing) exist because of\r\ncondition in _version_from_git_describe() does not works as expected on my/our boxes (old Windows?, old buggy Python 2.7.8 where '/' path separator is not treated in the same way as '\\\\'? nevermind...)\r\n\r\nSimple fix below fully avoids our problems:\r\n\r\n```\r\ndiff --git a/scapy/__init__.py b/scapy/__init__.py\r\nindex 4893a9f..4f38578 100644\r\n--- a/scapy/__init__.py\r\n+++ b/scapy/__init__.py\r\n@@ -38,7 +38,7 @@ def _version_from_git_describe():\r\n >>> _version_from_git_describe()\r\n '2.3.2.dev346'\r\n \"\"\"\r\n- if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):\r\n+ if not os.path.isdir(os.path.join(os.path.dirname(_SCAPY_PKG_DIR), '.git')):\r\n raise ValueError('not in scapy git repo')\r\n \r\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\r\n```\r\n\r\nThanks,\r\nAdam Karpierz\r\n\n", "before_files": [{"content": "## This file is part of Scapy\n## See http://www.secdev.org/projects/scapy for more informations\n## Copyright (C) Philippe Biondi <[email protected]>\n## This program is published under a GPLv2 license\n\n\"\"\"\nScapy: create, send, sniff, dissect and manipulate network packets.\n\nUsable either from an interactive console or as a Python library.\nhttp://www.secdev.org/projects/scapy\n\"\"\"\n\nimport os\nimport re\nimport subprocess\n\n\n_SCAPY_PKG_DIR = os.path.dirname(__file__)\n\ndef _version_from_git_describe():\n \"\"\"\n Read the version from ``git describe``. It returns the latest tag with an\n optional suffix if the current directory is not exactly on the tag.\n\n Example::\n\n $ git describe --always\n v2.3.2-346-g164a52c075c8\n\n The tag prefix (``v``) and the git commit sha1 (``-g164a52c075c8``) are\n removed if present.\n\n If the current directory is not exactly on the tag, a ``.devN`` suffix is\n appended where N is the number of commits made after the last tag.\n\n Example::\n\n >>> _version_from_git_describe()\n '2.3.2.dev346'\n \"\"\"\n if not os.path.isdir(os.path.join(_SCAPY_PKG_DIR, '../.git')):\n raise ValueError('not in scapy git repo')\n\n p = subprocess.Popen(['git', 'describe', '--always'], cwd=_SCAPY_PKG_DIR,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n if p.returncode == 0:\n tag = out.decode().strip()\n match = re.match('^v?(.+?)-(\\\\d+)-g[a-f0-9]+$', tag)\n if match:\n # remove the 'v' prefix and add a '.devN' suffix\n return '%s.dev%s' % (match.group(1), match.group(2))\n else:\n # just remove the 'v' prefix\n return re.sub('^v', '', tag)\n else:\n raise subprocess.CalledProcessError(p.returncode, err)\n\ndef _version():\n version_file = os.path.join(_SCAPY_PKG_DIR, 'VERSION')\n try:\n tag = _version_from_git_describe()\n # successfully read the tag from git, write it in VERSION for\n # installation and/or archive generation.\n with open(version_file, 'w') as f:\n f.write(tag)\n return tag\n except:\n # failed to read the tag from git, try to read it from a VERSION file\n try:\n with open(version_file, 'r') as f:\n tag = f.read()\n return tag\n except:\n # Rely on git archive \"export-subst\" git attribute.\n # See 'man gitattributes' for more details.\n git_archive_id = '$Format:%h %d$'\n sha1 = git_archive_id.strip().split()[0]\n match = re.search('tag:(\\\\S+)', git_archive_id)\n if match:\n return \"git-archive.dev\" + match.group(1)\n elif sha1:\n return \"git-archive.dev\" + sha1\n else:\n return 'unknown.version'\n\nVERSION = _version()\n\nif __name__ == \"__main__\":\n from scapy.main import interact\n interact()\n", "path": "scapy/__init__.py"}]} | 1,923 | 150 |
gh_patches_debug_10190 | rasdani/github-patches | git_diff | kivy__python-for-android-1112 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Websocket error: SSL not available
Getting this error when attempting to open a secure websocket ('wss://') using websocket-client.
09-02 09:12:04.037 10328 10719 I python : [ERROR ] [MyApp] Websocket error: SSL not available.
09-02 09:12:04.037 10328 10719 I python : [INFO ] [MyApp] Websocket closed
Still happens after trying:
* buildozer android clean
* deleting the .buildozer directory and re-building
* issuing `buildozer android update` and `buildozer android clean` and update
buildozer requirements:
requirements = kivy==1.10.0, sqlite3, openssl, pyopenssl, pygments, websocket-client
Other notes:
* To get websocket-client to load I had to copy websocket-client source locally into my project as recommended here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/websocket-client/__init__.py#L3
* I disabled the SSL check on websocket-client and found a lower level exception:
09-02 10:00:43.215 13478 13511 I python : [ERROR ] [MyApp] Websocket error: _ssl.c:331: No root certificates specified for verification of other-side certificates.
* SSL does work on my android build; elsewhere in our app we are successfully using https with urllib2.
</issue>
<code>
[start of pythonforandroid/recipes/websocket-client/__init__.py]
1 from pythonforandroid.toolchain import Recipe
2
3 # if android app crashes on start with "ImportError: No module named websocket"
4 #
5 # copy the 'websocket' directory into your app directory to force inclusion.
6 #
7 # see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe
8
9
10 class WebSocketClient(Recipe):
11
12 url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'
13
14 version = '0.40.0'
15 # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'
16
17 # patches = ['websocket.patch'] # Paths relative to the recipe dir
18
19 depends = ['kivy', 'python2', 'android', 'pyjnius',
20 'cryptography', 'pyasn1', 'pyopenssl']
21
22 recipe = WebSocketClient()
23
[end of pythonforandroid/recipes/websocket-client/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pythonforandroid/recipes/websocket-client/__init__.py b/pythonforandroid/recipes/websocket-client/__init__.py
--- a/pythonforandroid/recipes/websocket-client/__init__.py
+++ b/pythonforandroid/recipes/websocket-client/__init__.py
@@ -5,6 +5,12 @@
# copy the 'websocket' directory into your app directory to force inclusion.
#
# see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe
+#
+# If you see errors relating to 'SSL not available' ensure you have the package backports.ssl-match-hostname
+# in the buildozer requirements, since Kivy targets python 2.7.x
+#
+# You may also need sslopt={"cert_reqs": ssl.CERT_NONE} as a parameter to ws.run_forever() if you get an error relating to
+# host verification
class WebSocketClient(Recipe):
| {"golden_diff": "diff --git a/pythonforandroid/recipes/websocket-client/__init__.py b/pythonforandroid/recipes/websocket-client/__init__.py\n--- a/pythonforandroid/recipes/websocket-client/__init__.py\n+++ b/pythonforandroid/recipes/websocket-client/__init__.py\n@@ -5,6 +5,12 @@\n # copy the 'websocket' directory into your app directory to force inclusion.\n #\n # see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe\n+#\n+# If you see errors relating to 'SSL not available' ensure you have the package backports.ssl-match-hostname\n+# in the buildozer requirements, since Kivy targets python 2.7.x\n+#\n+# You may also need sslopt={\"cert_reqs\": ssl.CERT_NONE} as a parameter to ws.run_forever() if you get an error relating to\n+# host verification\n \n \n class WebSocketClient(Recipe):\n", "issue": "Websocket error: SSL not available \nGetting this error when attempting to open a secure websocket ('wss://') using websocket-client.\r\n\r\n09-02 09:12:04.037 10328 10719 I python : [ERROR ] [MyApp] Websocket error: SSL not available.\r\n09-02 09:12:04.037 10328 10719 I python : [INFO ] [MyApp] Websocket closed\r\n\r\nStill happens after trying: \r\n* buildozer android clean \r\n* deleting the .buildozer directory and re-building\r\n* issuing `buildozer android update` and `buildozer android clean` and update\r\n\r\nbuildozer requirements:\r\n\r\nrequirements = kivy==1.10.0, sqlite3, openssl, pyopenssl, pygments, websocket-client\r\n\r\nOther notes:\r\n\r\n* To get websocket-client to load I had to copy websocket-client source locally into my project as recommended here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/websocket-client/__init__.py#L3\r\n\r\n* I disabled the SSL check on websocket-client and found a lower level exception:\r\n\r\n09-02 10:00:43.215 13478 13511 I python : [ERROR ] [MyApp] Websocket error: _ssl.c:331: No root certificates specified for verification of other-side certificates.\r\n\r\n* SSL does work on my android build; elsewhere in our app we are successfully using https with urllib2. \r\n\r\n\r\n\n", "before_files": [{"content": "from pythonforandroid.toolchain import Recipe\n\n# if android app crashes on start with \"ImportError: No module named websocket\"\n#\n# copy the 'websocket' directory into your app directory to force inclusion.\n#\n# see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe\n\n\nclass WebSocketClient(Recipe):\n\n url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'\n\n version = '0.40.0'\n # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'\n\n # patches = ['websocket.patch'] # Paths relative to the recipe dir\n\n depends = ['kivy', 'python2', 'android', 'pyjnius',\n 'cryptography', 'pyasn1', 'pyopenssl']\n\nrecipe = WebSocketClient()\n", "path": "pythonforandroid/recipes/websocket-client/__init__.py"}]} | 1,154 | 205 |
gh_patches_debug_617 | rasdani/github-patches | git_diff | pex-tool__pex-1618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.67
On the docket:
+ [x] Expand --platform syntax: support full versions. #1614
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.66"
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.66"
+__version__ = "2.1.67"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.66\"\n+__version__ = \"2.1.67\"\n", "issue": "Release 2.1.67\nOn the docket:\r\n+ [x] Expand --platform syntax: support full versions. #1614 \n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.66\"\n", "path": "pex/version.py"}]} | 617 | 97 |
gh_patches_debug_22356 | rasdani/github-patches | git_diff | scrapy__scrapy-4375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"DeprecationWarning: Use of 'SCRAPY_'-prefixed environment variables" always shows
`ScrapyDeprecationWarning: Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.`
This deprecation warning can't be shut up inside a project.
https://github.com/scrapy/scrapy/blob/034e2c31c7d55333c3de208f80dcee1bf45ef9b9/scrapy/utils/project.py#L17-L19
This is always true, if inside a project. Because of
https://github.com/scrapy/scrapy/blob/034e2c31c7d55333c3de208f80dcee1bf45ef9b9/scrapy/utils/conf.py#L77-L84
We should probably stop "emulating" ENV vars there, and pass this somehow differently.
Additionally, the `ScrapyDeprecationWarning` for `env_overrides` is too generic: It never tells you _which_ variable(s) are actually set. This is plain annoying. Please make it more specific.
</issue>
<code>
[start of scrapy/utils/project.py]
1 import os
2 import pickle
3 import warnings
4
5 from importlib import import_module
6 from os.path import join, dirname, abspath, isabs, exists
7
8 from scrapy.utils.conf import closest_scrapy_cfg, get_config, init_env
9 from scrapy.settings import Settings
10 from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
11
12
13 ENVVAR = 'SCRAPY_SETTINGS_MODULE'
14 DATADIR_CFG_SECTION = 'datadir'
15
16
17 def inside_project():
18 scrapy_module = os.environ.get('SCRAPY_SETTINGS_MODULE')
19 if scrapy_module is not None:
20 try:
21 import_module(scrapy_module)
22 except ImportError as exc:
23 warnings.warn("Cannot import scrapy settings module %s: %s" % (scrapy_module, exc))
24 else:
25 return True
26 return bool(closest_scrapy_cfg())
27
28
29 def project_data_dir(project='default'):
30 """Return the current project data dir, creating it if it doesn't exist"""
31 if not inside_project():
32 raise NotConfigured("Not inside a project")
33 cfg = get_config()
34 if cfg.has_option(DATADIR_CFG_SECTION, project):
35 d = cfg.get(DATADIR_CFG_SECTION, project)
36 else:
37 scrapy_cfg = closest_scrapy_cfg()
38 if not scrapy_cfg:
39 raise NotConfigured("Unable to find scrapy.cfg file to infer project data dir")
40 d = abspath(join(dirname(scrapy_cfg), '.scrapy'))
41 if not exists(d):
42 os.makedirs(d)
43 return d
44
45
46 def data_path(path, createdir=False):
47 """
48 Return the given path joined with the .scrapy data directory.
49 If given an absolute path, return it unmodified.
50 """
51 if not isabs(path):
52 if inside_project():
53 path = join(project_data_dir(), path)
54 else:
55 path = join('.scrapy', path)
56 if createdir and not exists(path):
57 os.makedirs(path)
58 return path
59
60
61 def get_project_settings():
62 if ENVVAR not in os.environ:
63 project = os.environ.get('SCRAPY_PROJECT', 'default')
64 init_env(project)
65
66 settings = Settings()
67 settings_module_path = os.environ.get(ENVVAR)
68 if settings_module_path:
69 settings.setmodule(settings_module_path, priority='project')
70
71 pickled_settings = os.environ.get("SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE")
72 if pickled_settings:
73 warnings.warn("Use of environment variable "
74 "'SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE' "
75 "is deprecated.", ScrapyDeprecationWarning)
76 settings.setdict(pickle.loads(pickled_settings), priority='project')
77
78 env_overrides = {k[7:]: v for k, v in os.environ.items() if
79 k.startswith('SCRAPY_')}
80 if env_overrides:
81 warnings.warn("Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.", ScrapyDeprecationWarning)
82 settings.setdict(env_overrides, priority='project')
83 return settings
84
[end of scrapy/utils/project.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/utils/project.py b/scrapy/utils/project.py
--- a/scrapy/utils/project.py
+++ b/scrapy/utils/project.py
@@ -75,9 +75,24 @@
"is deprecated.", ScrapyDeprecationWarning)
settings.setdict(pickle.loads(pickled_settings), priority='project')
- env_overrides = {k[7:]: v for k, v in os.environ.items() if
- k.startswith('SCRAPY_')}
- if env_overrides:
- warnings.warn("Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.", ScrapyDeprecationWarning)
- settings.setdict(env_overrides, priority='project')
+ scrapy_envvars = {k[7:]: v for k, v in os.environ.items() if
+ k.startswith('SCRAPY_')}
+ valid_envvars = {
+ 'CHECK',
+ 'PICKLED_SETTINGS_TO_OVERRIDE',
+ 'PROJECT',
+ 'PYTHON_SHELL',
+ 'SETTINGS_MODULE',
+ }
+ setting_envvars = {k for k in scrapy_envvars if k not in valid_envvars}
+ if setting_envvars:
+ setting_envvar_list = ', '.join(sorted(setting_envvars))
+ warnings.warn(
+ 'Use of environment variables prefixed with SCRAPY_ to override '
+ 'settings is deprecated. The following environment variables are '
+ 'currently defined: {}'.format(setting_envvar_list),
+ ScrapyDeprecationWarning
+ )
+ settings.setdict(scrapy_envvars, priority='project')
+
return settings
| {"golden_diff": "diff --git a/scrapy/utils/project.py b/scrapy/utils/project.py\n--- a/scrapy/utils/project.py\n+++ b/scrapy/utils/project.py\n@@ -75,9 +75,24 @@\n \"is deprecated.\", ScrapyDeprecationWarning)\n settings.setdict(pickle.loads(pickled_settings), priority='project')\n \n- env_overrides = {k[7:]: v for k, v in os.environ.items() if\n- k.startswith('SCRAPY_')}\n- if env_overrides:\n- warnings.warn(\"Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.\", ScrapyDeprecationWarning)\n- settings.setdict(env_overrides, priority='project')\n+ scrapy_envvars = {k[7:]: v for k, v in os.environ.items() if\n+ k.startswith('SCRAPY_')}\n+ valid_envvars = {\n+ 'CHECK',\n+ 'PICKLED_SETTINGS_TO_OVERRIDE',\n+ 'PROJECT',\n+ 'PYTHON_SHELL',\n+ 'SETTINGS_MODULE',\n+ }\n+ setting_envvars = {k for k in scrapy_envvars if k not in valid_envvars}\n+ if setting_envvars:\n+ setting_envvar_list = ', '.join(sorted(setting_envvars))\n+ warnings.warn(\n+ 'Use of environment variables prefixed with SCRAPY_ to override '\n+ 'settings is deprecated. The following environment variables are '\n+ 'currently defined: {}'.format(setting_envvar_list),\n+ ScrapyDeprecationWarning\n+ )\n+ settings.setdict(scrapy_envvars, priority='project')\n+\n return settings\n", "issue": "\"DeprecationWarning: Use of 'SCRAPY_'-prefixed environment variables\" always shows\n`ScrapyDeprecationWarning: Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.`\r\n\r\nThis deprecation warning can't be shut up inside a project.\r\n\r\nhttps://github.com/scrapy/scrapy/blob/034e2c31c7d55333c3de208f80dcee1bf45ef9b9/scrapy/utils/project.py#L17-L19\r\n\r\nThis is always true, if inside a project. Because of\r\nhttps://github.com/scrapy/scrapy/blob/034e2c31c7d55333c3de208f80dcee1bf45ef9b9/scrapy/utils/conf.py#L77-L84\r\nWe should probably stop \"emulating\" ENV vars there, and pass this somehow differently.\r\n\r\nAdditionally, the `ScrapyDeprecationWarning` for `env_overrides` is too generic: It never tells you _which_ variable(s) are actually set. This is plain annoying. Please make it more specific.\n", "before_files": [{"content": "import os\nimport pickle\nimport warnings\n\nfrom importlib import import_module\nfrom os.path import join, dirname, abspath, isabs, exists\n\nfrom scrapy.utils.conf import closest_scrapy_cfg, get_config, init_env\nfrom scrapy.settings import Settings\nfrom scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning\n\n\nENVVAR = 'SCRAPY_SETTINGS_MODULE'\nDATADIR_CFG_SECTION = 'datadir'\n\n\ndef inside_project():\n scrapy_module = os.environ.get('SCRAPY_SETTINGS_MODULE')\n if scrapy_module is not None:\n try:\n import_module(scrapy_module)\n except ImportError as exc:\n warnings.warn(\"Cannot import scrapy settings module %s: %s\" % (scrapy_module, exc))\n else:\n return True\n return bool(closest_scrapy_cfg())\n\n\ndef project_data_dir(project='default'):\n \"\"\"Return the current project data dir, creating it if it doesn't exist\"\"\"\n if not inside_project():\n raise NotConfigured(\"Not inside a project\")\n cfg = get_config()\n if cfg.has_option(DATADIR_CFG_SECTION, project):\n d = cfg.get(DATADIR_CFG_SECTION, project)\n else:\n scrapy_cfg = closest_scrapy_cfg()\n if not scrapy_cfg:\n raise NotConfigured(\"Unable to find scrapy.cfg file to infer project data dir\")\n d = abspath(join(dirname(scrapy_cfg), '.scrapy'))\n if not exists(d):\n os.makedirs(d)\n return d\n\n\ndef data_path(path, createdir=False):\n \"\"\"\n Return the given path joined with the .scrapy data directory.\n If given an absolute path, return it unmodified.\n \"\"\"\n if not isabs(path):\n if inside_project():\n path = join(project_data_dir(), path)\n else:\n path = join('.scrapy', path)\n if createdir and not exists(path):\n os.makedirs(path)\n return path\n\n\ndef get_project_settings():\n if ENVVAR not in os.environ:\n project = os.environ.get('SCRAPY_PROJECT', 'default')\n init_env(project)\n\n settings = Settings()\n settings_module_path = os.environ.get(ENVVAR)\n if settings_module_path:\n settings.setmodule(settings_module_path, priority='project')\n\n pickled_settings = os.environ.get(\"SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE\")\n if pickled_settings:\n warnings.warn(\"Use of environment variable \"\n \"'SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE' \"\n \"is deprecated.\", ScrapyDeprecationWarning)\n settings.setdict(pickle.loads(pickled_settings), priority='project')\n\n env_overrides = {k[7:]: v for k, v in os.environ.items() if\n k.startswith('SCRAPY_')}\n if env_overrides:\n warnings.warn(\"Use of 'SCRAPY_'-prefixed environment variables to override settings is deprecated.\", ScrapyDeprecationWarning)\n settings.setdict(env_overrides, priority='project')\n return settings\n", "path": "scrapy/utils/project.py"}]} | 1,592 | 358 |
gh_patches_debug_17901 | rasdani/github-patches | git_diff | openstates__openstates-scrapers-386 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NY: Native American Relations committee is not being scraped correctly
http://www.nysenate.gov/committee/state-native-american-relations
http://openstates.org/admin/object_json/committees/NYC000116/
</issue>
<code>
[start of openstates/ny/committees.py]
1 import re
2
3 from billy.scrape import NoDataForPeriod
4 from billy.scrape.committees import CommitteeScraper, Committee
5
6 import lxml.html
7
8
9 def parse_name(name):
10 """
11 Split a committee membership string into name and role.
12
13 >>> parse_name('Felix Ortiz')
14 ('Felix Ortiz', 'member')
15 >>> parse_name('Felix Ortiz (Chair)')
16 ('Felix Ortiz', 'chair')
17 >>> parse_name('Hon. Felix Ortiz, Co-Chair')
18 ('Felix Ortiz', 'co-chair')
19 >>> parse_name('Owen H.\\r\\nJohnson (Vice Chairperson)')
20 ('Owen H. Johnson', 'vice chairperson')
21 """
22 name = re.sub(r'^(Hon\.|Assemblyman|Assemblywoman)\s+', '', name)
23 name = re.sub(r'\s+', ' ', name)
24
25 roles = ["Chairwoman", "Chairperson", "Chair", "Secretary", "Treasurer",
26 "Parliamentarian", "Chaplain"]
27 match = re.match(
28 r'([^(]+),? \(?((Co|Vice)?-?\s*(%s))\)?' % '|'.join(roles),
29 name)
30
31 if match:
32 name = match.group(1).strip(' ,')
33 role = match.group(2).lower()
34 return (name, role)
35 return (name, 'member')
36
37
38 class NYCommitteeScraper(CommitteeScraper):
39 state = "ny"
40 latest_only = True
41
42 def scrape(self, chamber, term):
43 getattr(self, 'scrape_' + chamber)()
44
45 def scrape_lower(self, only_names=None):
46 committees = []
47 url = "http://assembly.state.ny.us/comm/"
48 page = self.urlopen(url)
49 page = lxml.html.fromstring(page)
50 page.make_links_absolute(url)
51
52 for link in page.xpath("//a[contains(@href, 'sec=mem')]"):
53 name = link.xpath("string(../strong)").strip()
54 if 'Caucus' in name:
55 continue
56
57 url = link.attrib['href']
58
59 committees.append(name)
60
61 self.scrape_lower_committee(name, url)
62 return committees
63
64 def scrape_lower_committee(self, name, url):
65 page = self.urlopen(url)
66 page = lxml.html.fromstring(page)
67
68 comm = Committee('lower', name)
69 comm.add_source(url)
70
71 for link in page.xpath("//div[@class='commlinks']//a[contains(@href, 'mem')]"):
72 member = link.text.strip()
73 member = re.sub(r'\s+', ' ', member)
74
75 name, role = parse_name(member)
76 comm.add_member(name, role)
77
78 self.save_committee(comm)
79
80 def scrape_upper(self):
81 committees = []
82 url = "http://www.nysenate.gov/committees"
83 page = self.urlopen(url)
84 page = lxml.html.fromstring(page)
85 page.make_links_absolute(url)
86
87 for link in page.xpath("//a[contains(@href, '/committee/')]"):
88 name = link.text.strip()
89
90 if name == 'New York State Conference of Black Senators':
91 # stop scraping once we reach the caucuses
92 break
93
94 committees.append(name)
95 self.scrape_upper_committee(name, link.attrib['href'])
96 return committees
97
98 def scrape_upper_committee(self, name, url):
99 page = self.urlopen(url)
100 page = lxml.html.fromstring(page)
101
102 comm = Committee('upper', name)
103 comm.add_source(url)
104
105 member_div = page.xpath("//div[@class = 'committee-members']")[0]
106
107 seen = set()
108 for link in member_div.xpath(".//a"):
109 if not link.text:
110 continue
111
112 member = link.text.strip()
113
114 next_elem = link.getnext()
115 if (next_elem is not None and
116 next_elem.tag == 'a' and
117 next_elem.attrib['href'] == link.attrib['href']):
118 # Sometimes NY is cool and splits names across a
119 # couple links
120 member = "%s %s" % (member, next_elem.text.strip())
121
122 member = re.sub(r'\s+', ' ', member)
123
124 if member in seen or not member:
125 continue
126 seen.add(member)
127
128 name, role = parse_name(member)
129 comm.add_member(name, role)
130
131 self.save_committee(comm)
132
[end of openstates/ny/committees.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py
--- a/openstates/ny/committees.py
+++ b/openstates/ny/committees.py
@@ -84,15 +84,17 @@
page = lxml.html.fromstring(page)
page.make_links_absolute(url)
- for link in page.xpath("//a[contains(@href, '/committee/')]"):
- name = link.text.strip()
+ for h2 in page.xpath("//h2"):
+ committee_types = ['Standing Committees','Temporary Committees']
+ if h2.text not in committee_types:
+ continue
+
+ for link in h2.getparent().xpath(".//a[contains(@href, '/committee/')]"):
+ name = link.text.strip()
- if name == 'New York State Conference of Black Senators':
- # stop scraping once we reach the caucuses
- break
+ committees.append(name)
+ self.scrape_upper_committee(name, link.attrib['href'])
- committees.append(name)
- self.scrape_upper_committee(name, link.attrib['href'])
return committees
def scrape_upper_committee(self, name, url):
| {"golden_diff": "diff --git a/openstates/ny/committees.py b/openstates/ny/committees.py\n--- a/openstates/ny/committees.py\n+++ b/openstates/ny/committees.py\n@@ -84,15 +84,17 @@\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n \n- for link in page.xpath(\"//a[contains(@href, '/committee/')]\"):\n- name = link.text.strip()\n+ for h2 in page.xpath(\"//h2\"):\n+ committee_types = ['Standing Committees','Temporary Committees']\n+ if h2.text not in committee_types:\n+ continue\n+ \n+ for link in h2.getparent().xpath(\".//a[contains(@href, '/committee/')]\"):\n+ name = link.text.strip()\n \n- if name == 'New York State Conference of Black Senators':\n- # stop scraping once we reach the caucuses\n- break\n+ committees.append(name)\n+ self.scrape_upper_committee(name, link.attrib['href'])\n \n- committees.append(name)\n- self.scrape_upper_committee(name, link.attrib['href'])\n return committees\n \n def scrape_upper_committee(self, name, url):\n", "issue": "NY: Native American Relations committee is not being scraped correctly\nhttp://www.nysenate.gov/committee/state-native-american-relations\nhttp://openstates.org/admin/object_json/committees/NYC000116/\n\n", "before_files": [{"content": "import re\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.committees import CommitteeScraper, Committee\n\nimport lxml.html\n\n\ndef parse_name(name):\n \"\"\"\n Split a committee membership string into name and role.\n\n >>> parse_name('Felix Ortiz')\n ('Felix Ortiz', 'member')\n >>> parse_name('Felix Ortiz (Chair)')\n ('Felix Ortiz', 'chair')\n >>> parse_name('Hon. Felix Ortiz, Co-Chair')\n ('Felix Ortiz', 'co-chair')\n >>> parse_name('Owen H.\\\\r\\\\nJohnson (Vice Chairperson)')\n ('Owen H. Johnson', 'vice chairperson')\n \"\"\"\n name = re.sub(r'^(Hon\\.|Assemblyman|Assemblywoman)\\s+', '', name)\n name = re.sub(r'\\s+', ' ', name)\n\n roles = [\"Chairwoman\", \"Chairperson\", \"Chair\", \"Secretary\", \"Treasurer\",\n \"Parliamentarian\", \"Chaplain\"]\n match = re.match(\n r'([^(]+),? \\(?((Co|Vice)?-?\\s*(%s))\\)?' % '|'.join(roles),\n name)\n\n if match:\n name = match.group(1).strip(' ,')\n role = match.group(2).lower()\n return (name, role)\n return (name, 'member')\n\n\nclass NYCommitteeScraper(CommitteeScraper):\n state = \"ny\"\n latest_only = True\n\n def scrape(self, chamber, term):\n getattr(self, 'scrape_' + chamber)()\n\n def scrape_lower(self, only_names=None):\n committees = []\n url = \"http://assembly.state.ny.us/comm/\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, 'sec=mem')]\"):\n name = link.xpath(\"string(../strong)\").strip()\n if 'Caucus' in name:\n continue\n\n url = link.attrib['href']\n\n committees.append(name)\n\n self.scrape_lower_committee(name, url)\n return committees\n\n def scrape_lower_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('lower', name)\n comm.add_source(url)\n\n for link in page.xpath(\"//div[@class='commlinks']//a[contains(@href, 'mem')]\"):\n member = link.text.strip()\n member = re.sub(r'\\s+', ' ', member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n\n def scrape_upper(self):\n committees = []\n url = \"http://www.nysenate.gov/committees\"\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n page.make_links_absolute(url)\n\n for link in page.xpath(\"//a[contains(@href, '/committee/')]\"):\n name = link.text.strip()\n\n if name == 'New York State Conference of Black Senators':\n # stop scraping once we reach the caucuses\n break\n\n committees.append(name)\n self.scrape_upper_committee(name, link.attrib['href'])\n return committees\n\n def scrape_upper_committee(self, name, url):\n page = self.urlopen(url)\n page = lxml.html.fromstring(page)\n\n comm = Committee('upper', name)\n comm.add_source(url)\n\n member_div = page.xpath(\"//div[@class = 'committee-members']\")[0]\n\n seen = set()\n for link in member_div.xpath(\".//a\"):\n if not link.text:\n continue\n\n member = link.text.strip()\n\n next_elem = link.getnext()\n if (next_elem is not None and\n next_elem.tag == 'a' and\n next_elem.attrib['href'] == link.attrib['href']):\n # Sometimes NY is cool and splits names across a\n # couple links\n member = \"%s %s\" % (member, next_elem.text.strip())\n\n member = re.sub(r'\\s+', ' ', member)\n\n if member in seen or not member:\n continue\n seen.add(member)\n\n name, role = parse_name(member)\n comm.add_member(name, role)\n\n self.save_committee(comm)\n", "path": "openstates/ny/committees.py"}]} | 1,834 | 265 |
gh_patches_debug_4705 | rasdani/github-patches | git_diff | ultrabug__py3status-1549 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
default config location
I have installed ubuntu 18.04 and it makes sense to put my `i3status.conf` in `~/.config/i3/` but this is not found by default. I think we should add this path.
</issue>
<code>
[start of py3status/cli.py]
1 import argparse
2 import os
3
4
5 def parse_cli():
6 """
7 Parse the command line arguments
8 """
9
10 # FIXME do we really want to do this here?
11
12 # get home path
13 home_path = os.path.expanduser("~")
14
15 # i3status config file default detection
16 # respect i3status' file detection order wrt issue #43
17 i3status_config_file_candidates = [
18 "{}/.i3status.conf".format(home_path),
19 "{}/i3status/config".format(
20 os.environ.get("XDG_CONFIG_HOME", "{}/.config".format(home_path))
21 ),
22 "/etc/i3status.conf",
23 "{}/i3status/config".format(os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg")),
24 ]
25 for fn in i3status_config_file_candidates:
26 if os.path.isfile(fn):
27 i3status_config_file_default = fn
28 break
29 else:
30 # if none of the default files exists, we will default
31 # to ~/.i3/i3status.conf
32 i3status_config_file_default = "{}/.i3/i3status.conf".format(home_path)
33
34 # command line options
35 parser = argparse.ArgumentParser(
36 description="The agile, python-powered, i3status wrapper"
37 )
38 parser = argparse.ArgumentParser(add_help=True)
39 parser.add_argument(
40 "-b",
41 "--dbus-notify",
42 action="store_true",
43 default=False,
44 dest="dbus_notify",
45 help=(
46 "use notify-send to send user notifications "
47 "rather than i3-nagbar, "
48 "requires a notification daemon eg dunst"
49 ),
50 )
51 parser.add_argument(
52 "-c",
53 "--config",
54 action="store",
55 dest="i3status_conf",
56 type=str,
57 default=i3status_config_file_default,
58 help="path to i3status config file",
59 )
60 parser.add_argument(
61 "-d", "--debug", action="store_true", help="be verbose in syslog"
62 )
63 parser.add_argument(
64 "-g",
65 "--gevent",
66 action="store_true",
67 default=False,
68 dest="gevent",
69 help="enable gevent monkey patching (default False)",
70 )
71 parser.add_argument(
72 "-i",
73 "--include",
74 action="append",
75 dest="include_paths",
76 help=(
77 "include user-written modules from those "
78 "directories (default ~/.i3/py3status)"
79 ),
80 )
81 parser.add_argument(
82 "-l",
83 "--log-file",
84 action="store",
85 dest="log_file",
86 type=str,
87 default=None,
88 help="path to py3status log file",
89 )
90 parser.add_argument(
91 "-n",
92 "--interval",
93 action="store",
94 dest="interval",
95 type=float,
96 default=1,
97 help="update interval in seconds (default 1 sec)",
98 )
99 parser.add_argument(
100 "-s",
101 "--standalone",
102 action="store_true",
103 help="standalone mode, do not use i3status",
104 )
105 parser.add_argument(
106 "-t",
107 "--timeout",
108 action="store",
109 dest="cache_timeout",
110 type=int,
111 default=60,
112 help="default injection cache timeout in seconds (default 60 sec)",
113 )
114 parser.add_argument(
115 "-m",
116 "--disable-click-events",
117 action="store_true",
118 dest="disable_click_events",
119 default=False,
120 help="disable all click events",
121 )
122 parser.add_argument(
123 "-v", "--version", action="store_true", help="show py3status version and exit"
124 )
125 # FIXME we should make all of these explicit so they self document etc
126 parser.add_argument("cli_command", nargs="*", help=argparse.SUPPRESS)
127
128 options = parser.parse_args()
129
130 # only asked for version
131 if options.version:
132 import sys
133 from platform import python_version
134 from py3status.version import version
135
136 print("py3status version {} (python {})".format(version, python_version()))
137 sys.exit(0)
138
139 # all done
140 return options
141
[end of py3status/cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/py3status/cli.py b/py3status/cli.py
--- a/py3status/cli.py
+++ b/py3status/cli.py
@@ -19,6 +19,7 @@
"{}/i3status/config".format(
os.environ.get("XDG_CONFIG_HOME", "{}/.config".format(home_path))
),
+ "{}/.config/i3/".format(home_path),
"/etc/i3status.conf",
"{}/i3status/config".format(os.environ.get("XDG_CONFIG_DIRS", "/etc/xdg")),
]
| {"golden_diff": "diff --git a/py3status/cli.py b/py3status/cli.py\n--- a/py3status/cli.py\n+++ b/py3status/cli.py\n@@ -19,6 +19,7 @@\n \"{}/i3status/config\".format(\n os.environ.get(\"XDG_CONFIG_HOME\", \"{}/.config\".format(home_path))\n ),\n+ \"{}/.config/i3/\".format(home_path),\n \"/etc/i3status.conf\",\n \"{}/i3status/config\".format(os.environ.get(\"XDG_CONFIG_DIRS\", \"/etc/xdg\")),\n ]\n", "issue": "default config location\nI have installed ubuntu 18.04 and it makes sense to put my `i3status.conf` in `~/.config/i3/` but this is not found by default. I think we should add this path.\n", "before_files": [{"content": "import argparse\nimport os\n\n\ndef parse_cli():\n \"\"\"\n Parse the command line arguments\n \"\"\"\n\n # FIXME do we really want to do this here?\n\n # get home path\n home_path = os.path.expanduser(\"~\")\n\n # i3status config file default detection\n # respect i3status' file detection order wrt issue #43\n i3status_config_file_candidates = [\n \"{}/.i3status.conf\".format(home_path),\n \"{}/i3status/config\".format(\n os.environ.get(\"XDG_CONFIG_HOME\", \"{}/.config\".format(home_path))\n ),\n \"/etc/i3status.conf\",\n \"{}/i3status/config\".format(os.environ.get(\"XDG_CONFIG_DIRS\", \"/etc/xdg\")),\n ]\n for fn in i3status_config_file_candidates:\n if os.path.isfile(fn):\n i3status_config_file_default = fn\n break\n else:\n # if none of the default files exists, we will default\n # to ~/.i3/i3status.conf\n i3status_config_file_default = \"{}/.i3/i3status.conf\".format(home_path)\n\n # command line options\n parser = argparse.ArgumentParser(\n description=\"The agile, python-powered, i3status wrapper\"\n )\n parser = argparse.ArgumentParser(add_help=True)\n parser.add_argument(\n \"-b\",\n \"--dbus-notify\",\n action=\"store_true\",\n default=False,\n dest=\"dbus_notify\",\n help=(\n \"use notify-send to send user notifications \"\n \"rather than i3-nagbar, \"\n \"requires a notification daemon eg dunst\"\n ),\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n dest=\"i3status_conf\",\n type=str,\n default=i3status_config_file_default,\n help=\"path to i3status config file\",\n )\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"be verbose in syslog\"\n )\n parser.add_argument(\n \"-g\",\n \"--gevent\",\n action=\"store_true\",\n default=False,\n dest=\"gevent\",\n help=\"enable gevent monkey patching (default False)\",\n )\n parser.add_argument(\n \"-i\",\n \"--include\",\n action=\"append\",\n dest=\"include_paths\",\n help=(\n \"include user-written modules from those \"\n \"directories (default ~/.i3/py3status)\"\n ),\n )\n parser.add_argument(\n \"-l\",\n \"--log-file\",\n action=\"store\",\n dest=\"log_file\",\n type=str,\n default=None,\n help=\"path to py3status log file\",\n )\n parser.add_argument(\n \"-n\",\n \"--interval\",\n action=\"store\",\n dest=\"interval\",\n type=float,\n default=1,\n help=\"update interval in seconds (default 1 sec)\",\n )\n parser.add_argument(\n \"-s\",\n \"--standalone\",\n action=\"store_true\",\n help=\"standalone mode, do not use i3status\",\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n action=\"store\",\n dest=\"cache_timeout\",\n type=int,\n default=60,\n help=\"default injection cache timeout in seconds (default 60 sec)\",\n )\n parser.add_argument(\n \"-m\",\n \"--disable-click-events\",\n action=\"store_true\",\n dest=\"disable_click_events\",\n default=False,\n help=\"disable all click events\",\n )\n parser.add_argument(\n \"-v\", \"--version\", action=\"store_true\", help=\"show py3status version and exit\"\n )\n # FIXME we should make all of these explicit so they self document etc\n parser.add_argument(\"cli_command\", nargs=\"*\", help=argparse.SUPPRESS)\n\n options = parser.parse_args()\n\n # only asked for version\n if options.version:\n import sys\n from platform import python_version\n from py3status.version import version\n\n print(\"py3status version {} (python {})\".format(version, python_version()))\n sys.exit(0)\n\n # all done\n return options\n", "path": "py3status/cli.py"}]} | 1,798 | 124 |
gh_patches_debug_895 | rasdani/github-patches | git_diff | ESMCI__cime-993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
scripts_regression_tests.py O_TestTestScheduler
This test fails with error SystemExit: ERROR: Leftover threads?
when run as part of the full scripts_regression_tests.py
but passes when run using ctest or when run as an individual test.
</issue>
<code>
[start of utils/python/CIME/code_checker.py]
1 """
2 Libraries for checking python code with pylint
3 """
4
5 from CIME.XML.standard_module_setup import *
6
7 from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable
8
9 from multiprocessing.dummy import Pool as ThreadPool
10 from distutils.spawn import find_executable
11
12 logger = logging.getLogger(__name__)
13
14 ###############################################################################
15 def _run_pylint(on_file, interactive):
16 ###############################################################################
17 pylint = find_executable("pylint")
18
19 cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement"
20 cimeroot = get_cime_root()
21
22 if "scripts/Tools" in on_file:
23 cmd_options +=",relative-import"
24
25 # add init-hook option
26 cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\"))'"%\
27 (os.path.join(cimeroot,"utils","python"),
28 os.path.join(cimeroot,"scripts","Tools"))
29
30 cmd = "%s %s %s" % (pylint, cmd_options, on_file)
31 logger.debug("pylint command is %s"%cmd)
32 stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)
33 if stat != 0:
34 if interactive:
35 logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd))
36 logger.info(out + "\n" + err)
37 return (on_file, out + "\n" + err)
38 else:
39 if interactive:
40 logger.info("File %s has no pylint problems" % on_file)
41 return (on_file, "")
42
43 ###############################################################################
44 def _matches(file_path, file_ends):
45 ###############################################################################
46 for file_end in file_ends:
47 if file_path.endswith(file_end):
48 return True
49
50 return False
51
52 ###############################################################################
53 def _should_pylint_skip(filepath):
54 ###############################################################################
55 # TODO - get rid of this
56 list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs", "unit_test")
57 for dir_to_skip in list_of_directories_to_ignore:
58 if dir_to_skip in filepath:
59 return True
60
61 return False
62
63 ###############################################################################
64 def get_all_checkable_files():
65 ###############################################################################
66 cimeroot = get_cime_root()
67 all_git_files = run_cmd_no_fail("git ls-files --full-name %s" % cimeroot, verbose=False).splitlines()
68 files_to_test = [item for item in all_git_files
69 if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]
70 return files_to_test
71
72 ###############################################################################
73 def check_code(files, num_procs=10, interactive=False):
74 ###############################################################################
75 """
76 Check all python files in the given directory
77
78 Returns True if all files had no problems
79 """
80 # Get list of files to check, we look to see if user-provided file argument
81 # is a valid file, if not, we search the repo for a file with similar name.
82 repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()
83 files_to_check = []
84 if files:
85 for filearg in files:
86 if os.path.exists(filearg):
87 files_to_check.append(os.path.abspath(filearg))
88 else:
89 found = False
90 for repo_file in repo_files:
91 if repo_file.endswith(filearg):
92 found = True
93 files_to_check.append(repo_file) # could have multiple matches
94
95 if not found:
96 logger.warning("Could not find file matching argument '%s'" % filearg)
97 else:
98 # Check every python file
99 files_to_check = get_all_checkable_files()
100
101 expect(len(files_to_check) > 0, "No matching files found")
102
103 # No point in using more threads than files
104 if len(files_to_check) < num_procs:
105 num_procs = len(files_to_check)
106
107 pool = ThreadPool(num_procs)
108 results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)
109 return dict(results)
110
[end of utils/python/CIME/code_checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/python/CIME/code_checker.py b/utils/python/CIME/code_checker.py
--- a/utils/python/CIME/code_checker.py
+++ b/utils/python/CIME/code_checker.py
@@ -106,4 +106,6 @@
pool = ThreadPool(num_procs)
results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)
+ pool.close()
+ pool.join()
return dict(results)
| {"golden_diff": "diff --git a/utils/python/CIME/code_checker.py b/utils/python/CIME/code_checker.py\n--- a/utils/python/CIME/code_checker.py\n+++ b/utils/python/CIME/code_checker.py\n@@ -106,4 +106,6 @@\n \n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n+ pool.close()\n+ pool.join()\n return dict(results)\n", "issue": "scripts_regression_tests.py O_TestTestScheduler\nThis test fails with error SystemExit: ERROR: Leftover threads?\r\nwhen run as part of the full scripts_regression_tests.py \r\nbut passes when run using ctest or when run as an individual test.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cimeroot = get_cime_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"utils\",\"python\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\", \"unit_test\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip in filepath:\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files --full-name %s\" % cimeroot, verbose=False).splitlines()\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()\n files_to_check = []\n if files:\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n return dict(results)\n", "path": "utils/python/CIME/code_checker.py"}]} | 1,754 | 99 |
gh_patches_debug_19899 | rasdani/github-patches | git_diff | modin-project__modin-6123 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`pd.read_feather(file)` actually reads the file twice!
There's a really strange logic in our Feather dispatcher introduced a long time ago that reads the whole Feather file just to extract its column names:
https://github.com/modin-project/modin/blob/632d724adb013d6f4d18aa2a4f2faa9325d324e8/modin/core/io/column_stores/feather_dispatcher.py#L55-L64
Note that `.build_query_compiler(..)` launch parsing kernels that would also be reading the same parquet file but in a distributed way.
We should be using more smarter way to extract feather's metadata: https://issues.apache.org/jira/browse/ARROW-10344
</issue>
<code>
[start of modin/core/io/column_stores/feather_dispatcher.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Module houses `FeatherDispatcher` class, that is used for reading `.feather` files."""
15
16 from modin.core.io.column_stores.column_store_dispatcher import ColumnStoreDispatcher
17 from modin.utils import import_optional_dependency
18 from modin.core.io.file_dispatcher import OpenFile
19
20
21 class FeatherDispatcher(ColumnStoreDispatcher):
22 """Class handles utils for reading `.feather` files."""
23
24 @classmethod
25 def _read(cls, path, columns=None, **kwargs):
26 """
27 Read data from the file path, returning a query compiler.
28
29 Parameters
30 ----------
31 path : str or file-like object
32 The filepath of the feather file.
33 columns : array-like, optional
34 Columns to read from file. If not provided, all columns are read.
35 **kwargs : dict
36 `read_feather` function kwargs.
37
38 Returns
39 -------
40 BaseQueryCompiler
41 Query compiler with imported data for further processing.
42
43 Notes
44 -----
45 `PyArrow` engine and local files only are supported for now,
46 multi threading is set to False by default.
47 PyArrow feather is used. Please refer to the documentation here
48 https://arrow.apache.org/docs/python/api.html#feather-format
49 """
50 path = cls.get_path(path)
51 if columns is None:
52 import_optional_dependency(
53 "pyarrow", "pyarrow is required to read feather files."
54 )
55 from pyarrow.feather import read_feather
56
57 with OpenFile(
58 path,
59 **(kwargs.get("storage_options", None) or {}),
60 ) as file:
61 df = read_feather(file)
62 # pyarrow.feather.read_feather doesn't support columns as pandas.Index
63 columns = list(df.columns)
64 return cls.build_query_compiler(path, columns, use_threads=False)
65
[end of modin/core/io/column_stores/feather_dispatcher.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/core/io/column_stores/feather_dispatcher.py b/modin/core/io/column_stores/feather_dispatcher.py
--- a/modin/core/io/column_stores/feather_dispatcher.py
+++ b/modin/core/io/column_stores/feather_dispatcher.py
@@ -52,13 +52,15 @@
import_optional_dependency(
"pyarrow", "pyarrow is required to read feather files."
)
- from pyarrow.feather import read_feather
+ from pyarrow import ipc
with OpenFile(
path,
**(kwargs.get("storage_options", None) or {}),
) as file:
- df = read_feather(file)
- # pyarrow.feather.read_feather doesn't support columns as pandas.Index
- columns = list(df.columns)
+ # Opens the file to extract its metadata
+ reader = ipc.open_file(file)
+ # TODO: pyarrow's schema contains much more metadata than just column names, it also
+ # has dtypes and index information that we could use when building a dataframe
+ columns = reader.schema.names
return cls.build_query_compiler(path, columns, use_threads=False)
| {"golden_diff": "diff --git a/modin/core/io/column_stores/feather_dispatcher.py b/modin/core/io/column_stores/feather_dispatcher.py\n--- a/modin/core/io/column_stores/feather_dispatcher.py\n+++ b/modin/core/io/column_stores/feather_dispatcher.py\n@@ -52,13 +52,15 @@\n import_optional_dependency(\n \"pyarrow\", \"pyarrow is required to read feather files.\"\n )\n- from pyarrow.feather import read_feather\n+ from pyarrow import ipc\n \n with OpenFile(\n path,\n **(kwargs.get(\"storage_options\", None) or {}),\n ) as file:\n- df = read_feather(file)\n- # pyarrow.feather.read_feather doesn't support columns as pandas.Index\n- columns = list(df.columns)\n+ # Opens the file to extract its metadata\n+ reader = ipc.open_file(file)\n+ # TODO: pyarrow's schema contains much more metadata than just column names, it also\n+ # has dtypes and index information that we could use when building a dataframe\n+ columns = reader.schema.names\n return cls.build_query_compiler(path, columns, use_threads=False)\n", "issue": "`pd.read_feather(file)` actually reads the file twice!\nThere's a really strange logic in our Feather dispatcher introduced a long time ago that reads the whole Feather file just to extract its column names:\r\nhttps://github.com/modin-project/modin/blob/632d724adb013d6f4d18aa2a4f2faa9325d324e8/modin/core/io/column_stores/feather_dispatcher.py#L55-L64\r\n\r\nNote that `.build_query_compiler(..)` launch parsing kernels that would also be reading the same parquet file but in a distributed way.\r\n\r\nWe should be using more smarter way to extract feather's metadata: https://issues.apache.org/jira/browse/ARROW-10344\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses `FeatherDispatcher` class, that is used for reading `.feather` files.\"\"\"\n\nfrom modin.core.io.column_stores.column_store_dispatcher import ColumnStoreDispatcher\nfrom modin.utils import import_optional_dependency\nfrom modin.core.io.file_dispatcher import OpenFile\n\n\nclass FeatherDispatcher(ColumnStoreDispatcher):\n \"\"\"Class handles utils for reading `.feather` files.\"\"\"\n\n @classmethod\n def _read(cls, path, columns=None, **kwargs):\n \"\"\"\n Read data from the file path, returning a query compiler.\n\n Parameters\n ----------\n path : str or file-like object\n The filepath of the feather file.\n columns : array-like, optional\n Columns to read from file. If not provided, all columns are read.\n **kwargs : dict\n `read_feather` function kwargs.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n\n Notes\n -----\n `PyArrow` engine and local files only are supported for now,\n multi threading is set to False by default.\n PyArrow feather is used. Please refer to the documentation here\n https://arrow.apache.org/docs/python/api.html#feather-format\n \"\"\"\n path = cls.get_path(path)\n if columns is None:\n import_optional_dependency(\n \"pyarrow\", \"pyarrow is required to read feather files.\"\n )\n from pyarrow.feather import read_feather\n\n with OpenFile(\n path,\n **(kwargs.get(\"storage_options\", None) or {}),\n ) as file:\n df = read_feather(file)\n # pyarrow.feather.read_feather doesn't support columns as pandas.Index\n columns = list(df.columns)\n return cls.build_query_compiler(path, columns, use_threads=False)\n", "path": "modin/core/io/column_stores/feather_dispatcher.py"}]} | 1,381 | 259 |
gh_patches_debug_13994 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-2057 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Tesseract checks function doesn't recognize Chinese language (and some other languages) correctly
### Description
Good morning, I am new user of paperless-ngx. I tried to start a container using the lazy installation scripts.
`bash -c "$(curl -L https://raw.githubusercontent.com/paperless-ngx/paperless-ngx/main/install-paperless-ngx.sh)"`
**1. Problem Description**: when I set `PAPERLESS_OCR_LANGUAGE=eng+chi-sim`, the webserver cannot start correctly because it fails the tesseract language checks.
**2. Reason of the problem**:
In `docker/docker-entrypoint.sh`, the tesseract language packages are installed through the following command: `pkg="tesseract-ocr-$lang"`. And the naming could be found in [Ubuntu tesseract-ocr-all](https://packages.ubuntu.com/focal/tesseract-ocr-all) for example. So to install the Chinese Simplified package, you need to specify `chi-sim` in `PAPERLESS_OCR_LANGUAGES`.
However, when `-ngx` checks if the tesseract language package exists, it calls `tesseract --list-langs`.
I logged in the web server container using `docker exec -it paperless-webserver-1 bash`.
```
root@c95f0258d544:/usr/src/paperless/src# tesseract --list-langs
List of available languages (7):
chi_sim
deu
eng
fra
ita
osd
spa
```
Here the `chi-sim` appears as `chi_sim`
**3. Solution**: for users using some language, like Chinese, Korean or Arabic, etc.
`PAPERLESS_OCR_LANGUAGES`: this env parameter tells which tesseract-ocr packages to install
`PAPERLESS_OCR_LANGUAGE`: this env parameter tells which language in `tesseract --list-langs` will be used for OCR.
In this Chinese Simplified language situation, after setting:
```
PAPERLESS_OCR_LANGUAGE=eng+chi_sim
PAPERLESS_OCR_LANGUAGES=eng chi-sim
```
the webserver container could start correctly.
**4. Potential Update**
It's better to make `get_tesseract_langs()` function's output (in `paperless-ngx/src/paperless_tesseract/checks.py`) consistent with the `apt-get install tesseract-ocr-$lang`.
### Steps to reproduce
1. Use the lazy installer `bash -c "$(curl -L https://raw.githubusercontent.com/paperless-ngx/paperless-ngx/main/install-paperless-ngx.sh)"` or set `PAPERLESS_OCR_LANGUAGE=eng+chi-sim` and `PAPERLESS_OCR_LANGUAGES=eng chi-sim`
2. The server cannot pass the language checks
### Webserver logs
```bash
paperless-webserver-1 | Paperless-ngx docker container starting...
paperless-webserver-1 | Installing languages...
paperless-webserver-1 | Hit:1 http://deb.debian.org/debian bullseye InRelease
paperless-webserver-1 | Hit:2 http://deb.debian.org/debian-security bullseye-security InRelease
paperless-webserver-1 | Hit:3 http://deb.debian.org/debian bullseye-updates InRelease
paperless-webserver-1 | Reading package lists...
paperless-webserver-1 | Package tesseract-ocr-eng already installed!
paperless-webserver-1 | Package tesseract-ocr-chi-sim already installed!
paperless-webserver-1 | Creating directory /tmp/paperless
paperless-webserver-1 | Adjusting permissions of paperless files. This may take a while.
paperless-webserver-1 | Waiting for PostgreSQL to start...
paperless-webserver-1 | Waiting for Redis...
paperless-webserver-1 | Connected to Redis broker.
paperless-webserver-1 | Apply database migrations...
paperless-webserver-1 | SystemCheckError: System check identified some issues:
paperless-webserver-1 |
paperless-webserver-1 | ERRORS:
paperless-
```
### Browser logs
_No response_
### Paperless-ngx version
1.9.2
### Host OS
Ubuntu 22.04
### Installation method
Docker - official image
### Browser
_No response_
### Configuration changes
_No response_
### Other
_No response_
</issue>
<code>
[start of src/paperless_tesseract/checks.py]
1 import subprocess
2
3 from django.conf import settings
4 from django.core.checks import Error
5 from django.core.checks import register
6 from django.core.checks import Warning
7
8
9 def get_tesseract_langs():
10 with subprocess.Popen(["tesseract", "--list-langs"], stdout=subprocess.PIPE) as p:
11 stdout, stderr = p.communicate()
12
13 return stdout.decode().strip().split("\n")[1:]
14
15
16 @register()
17 def check_default_language_available(app_configs, **kwargs):
18 installed_langs = get_tesseract_langs()
19
20 if not settings.OCR_LANGUAGE:
21 return [
22 Warning(
23 "No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. "
24 "This means that tesseract will fallback to english.",
25 ),
26 ]
27
28 specified_langs = settings.OCR_LANGUAGE.split("+")
29
30 for lang in specified_langs:
31 if lang not in installed_langs:
32 return [
33 Error(
34 f"The selected ocr language {lang} is "
35 f"not installed. Paperless cannot OCR your documents "
36 f"without it. Please fix PAPERLESS_OCR_LANGUAGE.",
37 ),
38 ]
39
40 return []
41
[end of src/paperless_tesseract/checks.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py
--- a/src/paperless_tesseract/checks.py
+++ b/src/paperless_tesseract/checks.py
@@ -1,3 +1,4 @@
+import shutil
import subprocess
from django.conf import settings
@@ -7,10 +8,16 @@
def get_tesseract_langs():
- with subprocess.Popen(["tesseract", "--list-langs"], stdout=subprocess.PIPE) as p:
- stdout, stderr = p.communicate()
+ proc = subprocess.run(
+ [shutil.which("tesseract"), "--list-langs"],
+ capture_output=True,
+ )
- return stdout.decode().strip().split("\n")[1:]
+ # Decode bytes to string, split on newlines, trim out the header
+ proc_lines = proc.stdout.decode("utf8", errors="ignore").strip().split("\n")[1:]
+
+ # Replace _ with - to convert two part languages to the expected code
+ return [x.replace("_", "-") for x in proc_lines]
@register()
| {"golden_diff": "diff --git a/src/paperless_tesseract/checks.py b/src/paperless_tesseract/checks.py\n--- a/src/paperless_tesseract/checks.py\n+++ b/src/paperless_tesseract/checks.py\n@@ -1,3 +1,4 @@\n+import shutil\n import subprocess\n \n from django.conf import settings\n@@ -7,10 +8,16 @@\n \n \n def get_tesseract_langs():\n- with subprocess.Popen([\"tesseract\", \"--list-langs\"], stdout=subprocess.PIPE) as p:\n- stdout, stderr = p.communicate()\n+ proc = subprocess.run(\n+ [shutil.which(\"tesseract\"), \"--list-langs\"],\n+ capture_output=True,\n+ )\n \n- return stdout.decode().strip().split(\"\\n\")[1:]\n+ # Decode bytes to string, split on newlines, trim out the header\n+ proc_lines = proc.stdout.decode(\"utf8\", errors=\"ignore\").strip().split(\"\\n\")[1:]\n+\n+ # Replace _ with - to convert two part languages to the expected code\n+ return [x.replace(\"_\", \"-\") for x in proc_lines]\n \n \n @register()\n", "issue": "[BUG] Tesseract checks function doesn't recognize Chinese language (and some other languages) correctly\n### Description\r\n\r\nGood morning, I am new user of paperless-ngx. I tried to start a container using the lazy installation scripts.\r\n`bash -c \"$(curl -L https://raw.githubusercontent.com/paperless-ngx/paperless-ngx/main/install-paperless-ngx.sh)\"`\r\n\r\n**1. Problem Description**: when I set `PAPERLESS_OCR_LANGUAGE=eng+chi-sim`, the webserver cannot start correctly because it fails the tesseract language checks.\r\n\r\n**2. Reason of the problem**: \r\nIn `docker/docker-entrypoint.sh`, the tesseract language packages are installed through the following command: `pkg=\"tesseract-ocr-$lang\"`. And the naming could be found in [Ubuntu tesseract-ocr-all](https://packages.ubuntu.com/focal/tesseract-ocr-all) for example. So to install the Chinese Simplified package, you need to specify `chi-sim` in `PAPERLESS_OCR_LANGUAGES`.\r\n\r\nHowever, when `-ngx` checks if the tesseract language package exists, it calls `tesseract --list-langs`. \r\nI logged in the web server container using `docker exec -it paperless-webserver-1 bash`.\r\n```\r\nroot@c95f0258d544:/usr/src/paperless/src# tesseract --list-langs\r\nList of available languages (7):\r\nchi_sim\r\ndeu\r\neng\r\nfra\r\nita\r\nosd\r\nspa\r\n```\r\nHere the `chi-sim` appears as `chi_sim`\r\n\r\n**3. Solution**: for users using some language, like Chinese, Korean or Arabic, etc.\r\n`PAPERLESS_OCR_LANGUAGES`: this env parameter tells which tesseract-ocr packages to install\r\n`PAPERLESS_OCR_LANGUAGE`: this env parameter tells which language in `tesseract --list-langs` will be used for OCR.\r\n\r\nIn this Chinese Simplified language situation, after setting:\r\n```\r\nPAPERLESS_OCR_LANGUAGE=eng+chi_sim\r\nPAPERLESS_OCR_LANGUAGES=eng chi-sim\r\n```\r\nthe webserver container could start correctly.\r\n\r\n**4. Potential Update**\r\nIt's better to make `get_tesseract_langs()` function's output (in `paperless-ngx/src/paperless_tesseract/checks.py`) consistent with the `apt-get install tesseract-ocr-$lang`.\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. Use the lazy installer `bash -c \"$(curl -L https://raw.githubusercontent.com/paperless-ngx/paperless-ngx/main/install-paperless-ngx.sh)\"` or set `PAPERLESS_OCR_LANGUAGE=eng+chi-sim` and `PAPERLESS_OCR_LANGUAGES=eng chi-sim`\r\n2. The server cannot pass the language checks\r\n\r\n\r\n### Webserver logs\r\n\r\n```bash\r\npaperless-webserver-1 | Paperless-ngx docker container starting...\r\npaperless-webserver-1 | Installing languages...\r\npaperless-webserver-1 | Hit:1 http://deb.debian.org/debian bullseye InRelease\r\npaperless-webserver-1 | Hit:2 http://deb.debian.org/debian-security bullseye-security InRelease\r\npaperless-webserver-1 | Hit:3 http://deb.debian.org/debian bullseye-updates InRelease\r\npaperless-webserver-1 | Reading package lists...\r\npaperless-webserver-1 | Package tesseract-ocr-eng already installed!\r\npaperless-webserver-1 | Package tesseract-ocr-chi-sim already installed!\r\npaperless-webserver-1 | Creating directory /tmp/paperless\r\npaperless-webserver-1 | Adjusting permissions of paperless files. This may take a while.\r\npaperless-webserver-1 | Waiting for PostgreSQL to start...\r\npaperless-webserver-1 | Waiting for Redis...\r\npaperless-webserver-1 | Connected to Redis broker.\r\npaperless-webserver-1 | Apply database migrations...\r\npaperless-webserver-1 | SystemCheckError: System check identified some issues:\r\npaperless-webserver-1 | \r\npaperless-webserver-1 | ERRORS:\r\npaperless-\r\n```\r\n\r\n\r\n### Browser logs\r\n\r\n_No response_\r\n\r\n### Paperless-ngx version\r\n\r\n1.9.2\r\n\r\n### Host OS\r\n\r\nUbuntu 22.04\r\n\r\n### Installation method\r\n\r\nDocker - official image\r\n\r\n### Browser\r\n\r\n_No response_\r\n\r\n### Configuration changes\r\n\r\n_No response_\r\n\r\n### Other\r\n\r\n_No response_\n", "before_files": [{"content": "import subprocess\n\nfrom django.conf import settings\nfrom django.core.checks import Error\nfrom django.core.checks import register\nfrom django.core.checks import Warning\n\n\ndef get_tesseract_langs():\n with subprocess.Popen([\"tesseract\", \"--list-langs\"], stdout=subprocess.PIPE) as p:\n stdout, stderr = p.communicate()\n\n return stdout.decode().strip().split(\"\\n\")[1:]\n\n\n@register()\ndef check_default_language_available(app_configs, **kwargs):\n installed_langs = get_tesseract_langs()\n\n if not settings.OCR_LANGUAGE:\n return [\n Warning(\n \"No OCR language has been specified with PAPERLESS_OCR_LANGUAGE. \"\n \"This means that tesseract will fallback to english.\",\n ),\n ]\n\n specified_langs = settings.OCR_LANGUAGE.split(\"+\")\n\n for lang in specified_langs:\n if lang not in installed_langs:\n return [\n Error(\n f\"The selected ocr language {lang} is \"\n f\"not installed. Paperless cannot OCR your documents \"\n f\"without it. Please fix PAPERLESS_OCR_LANGUAGE.\",\n ),\n ]\n\n return []\n", "path": "src/paperless_tesseract/checks.py"}]} | 1,819 | 249 |
gh_patches_debug_37785 | rasdani/github-patches | git_diff | DataDog__dd-agent-1241 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Marathon >= 0.7 failing
The Marathon plugin checks the TaskRateLimit attribute from the Marathon 0.6 API version, which has been removed https://github.com/mesosphere/marathon/blob/master/docs/docs/upgrade/06xto070.md#task-rate-limit
you can see the dependency here:
https://github.com/DataDog/dd-agent/blob/73dd75035f1982d31818bb8a4a2a6cc856cf8bb7/checks.d/marathon.py#L30
using marathon v0.7.6, the `dd-agent info` looks like this:
```
marathon
--------
- instance #0 [ERROR]: KeyError('taskRateLimit',)
- Collected 1 metric, 0 events & 1 service check
```
</issue>
<code>
[start of checks.d/marathon.py]
1 # stdlib
2 import time
3 from hashlib import md5
4 import urllib2
5
6 # project
7 from checks import AgentCheck
8 from util import headers
9
10 # 3rd party
11 import simplejson as json
12 import requests
13
14 class Marathon(AgentCheck):
15 def check(self, instance):
16 if 'url' not in instance:
17 raise Exception('Marathon instance missing "url" value.')
18
19 # Load values from the instance config
20 url = instance['url']
21 instance_tags = instance.get('tags', [])
22 default_timeout = self.init_config.get('default_timeout', 5)
23 timeout = float(instance.get('timeout', default_timeout))
24
25 response = self.get_v2_apps(url, timeout)
26 if response is not None:
27 self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)
28 for app in response['apps']:
29 tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags
30 for attr in ['taskRateLimit','instances','cpus','mem','tasksStaged','tasksRunning']:
31 self.gauge('marathon.' + attr, app[attr], tags=tags)
32 versions_reply = self.get_v2_app_versions(url, app['id'], timeout)
33 if versions_reply is not None:
34 self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
35
36 def get_v2_apps(self, url, timeout):
37 # Use a hash of the URL as an aggregation key
38 aggregation_key = md5(url).hexdigest()
39 try:
40 r = requests.get(url + "/v2/apps", timeout=timeout)
41 except requests.exceptions.Timeout as e:
42 # If there's a timeout
43 self.timeout_event(url, timeout, aggregation_key)
44 raise Exception("Timeout when hitting %s" % url)
45
46 if r.status_code != 200:
47 self.status_code_event(url, r, aggregation_key)
48 raise Exception("Got %s when hitting %s" % (r.status_code, url))
49
50 # Condition for request v1.x backward compatibility
51 if hasattr(r.json, '__call__'):
52 return r.json()
53 else:
54 return r.json
55
56 def get_v2_app_versions(self, url, app_id, timeout):
57 # Use a hash of the URL as an aggregation key
58 aggregation_key = md5(url).hexdigest()
59
60 try:
61 r = requests.get(url + "/v2/apps/" + app_id + "/versions", timeout=timeout)
62 except requests.exceptions.Timeout as e:
63 # If there's a timeout
64 self.timeout_event(url, timeout, aggregation_key)
65 self.warning("Timeout when hitting %s" % url)
66 return None
67
68 if r.status_code != 200:
69 self.status_code_event(url, r, aggregation_key)
70 self.warning("Got %s when hitting %s" % (r.status_code, url))
71 return None
72
73 return r.json()
74
75 def timeout_event(self, url, timeout, aggregation_key):
76 self.event({
77 'timestamp': int(time.time()),
78 'event_type': 'http_check',
79 'msg_title': 'URL timeout',
80 'msg_text': '%s timed out after %s seconds.' % (url, timeout),
81 'aggregation_key': aggregation_key
82 })
83
84 def status_code_event(self, url, r, aggregation_key):
85 self.event({
86 'timestamp': int(time.time()),
87 'event_type': 'http_check',
88 'msg_title': 'Invalid reponse code for %s' % url,
89 'msg_text': '%s returned a status of %s' % (url, r.status_code),
90 'aggregation_key': aggregation_key
91 })
92
[end of checks.d/marathon.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/marathon.py b/checks.d/marathon.py
--- a/checks.d/marathon.py
+++ b/checks.d/marathon.py
@@ -1,11 +1,9 @@
# stdlib
import time
from hashlib import md5
-import urllib2
# project
from checks import AgentCheck
-from util import headers
# 3rd party
import simplejson as json
@@ -27,8 +25,9 @@
self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)
for app in response['apps']:
tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags
- for attr in ['taskRateLimit','instances','cpus','mem','tasksStaged','tasksRunning']:
- self.gauge('marathon.' + attr, app[attr], tags=tags)
+ for attr in ['taskRateLimit', 'instances', 'cpus', 'mem', 'tasksStaged', 'tasksRunning', 'backoffSeconds', 'backoffFactor']:
+ if attr in app:
+ self.gauge('marathon.' + attr, app[attr], tags=tags)
versions_reply = self.get_v2_app_versions(url, app['id'], timeout)
if versions_reply is not None:
self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)
@@ -38,7 +37,7 @@
aggregation_key = md5(url).hexdigest()
try:
r = requests.get(url + "/v2/apps", timeout=timeout)
- except requests.exceptions.Timeout as e:
+ except requests.exceptions.Timeout:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
raise Exception("Timeout when hitting %s" % url)
@@ -56,10 +55,10 @@
def get_v2_app_versions(self, url, app_id, timeout):
# Use a hash of the URL as an aggregation key
aggregation_key = md5(url).hexdigest()
-
+
try:
r = requests.get(url + "/v2/apps/" + app_id + "/versions", timeout=timeout)
- except requests.exceptions.Timeout as e:
+ except requests.exceptions.Timeout:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
self.warning("Timeout when hitting %s" % url)
| {"golden_diff": "diff --git a/checks.d/marathon.py b/checks.d/marathon.py\n--- a/checks.d/marathon.py\n+++ b/checks.d/marathon.py\n@@ -1,11 +1,9 @@\n # stdlib\n import time\n from hashlib import md5\n-import urllib2\n \n # project\n from checks import AgentCheck\n-from util import headers\n \n # 3rd party\n import simplejson as json\n@@ -27,8 +25,9 @@\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n- for attr in ['taskRateLimit','instances','cpus','mem','tasksStaged','tasksRunning']:\n- self.gauge('marathon.' + attr, app[attr], tags=tags)\n+ for attr in ['taskRateLimit', 'instances', 'cpus', 'mem', 'tasksStaged', 'tasksRunning', 'backoffSeconds', 'backoffFactor']:\n+ if attr in app:\n+ self.gauge('marathon.' + attr, app[attr], tags=tags)\n versions_reply = self.get_v2_app_versions(url, app['id'], timeout)\n if versions_reply is not None:\n self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n@@ -38,7 +37,7 @@\n aggregation_key = md5(url).hexdigest()\n try:\n r = requests.get(url + \"/v2/apps\", timeout=timeout)\n- except requests.exceptions.Timeout as e:\n+ except requests.exceptions.Timeout:\n # If there's a timeout\n self.timeout_event(url, timeout, aggregation_key)\n raise Exception(\"Timeout when hitting %s\" % url)\n@@ -56,10 +55,10 @@\n def get_v2_app_versions(self, url, app_id, timeout):\n # Use a hash of the URL as an aggregation key\n aggregation_key = md5(url).hexdigest()\n- \n+\n try:\n r = requests.get(url + \"/v2/apps/\" + app_id + \"/versions\", timeout=timeout)\n- except requests.exceptions.Timeout as e:\n+ except requests.exceptions.Timeout:\n # If there's a timeout\n self.timeout_event(url, timeout, aggregation_key)\n self.warning(\"Timeout when hitting %s\" % url)\n", "issue": "Marathon >= 0.7 failing\nThe Marathon plugin checks the TaskRateLimit attribute from the Marathon 0.6 API version, which has been removed https://github.com/mesosphere/marathon/blob/master/docs/docs/upgrade/06xto070.md#task-rate-limit\n\nyou can see the dependency here:\nhttps://github.com/DataDog/dd-agent/blob/73dd75035f1982d31818bb8a4a2a6cc856cf8bb7/checks.d/marathon.py#L30\n\nusing marathon v0.7.6, the `dd-agent info` looks like this:\n\n```\n marathon\n --------\n - instance #0 [ERROR]: KeyError('taskRateLimit',)\n - Collected 1 metric, 0 events & 1 service check\n```\n\n", "before_files": [{"content": "# stdlib\nimport time\nfrom hashlib import md5\nimport urllib2\n\n# project\nfrom checks import AgentCheck\nfrom util import headers\n\n# 3rd party\nimport simplejson as json\nimport requests\n\nclass Marathon(AgentCheck):\n def check(self, instance):\n if 'url' not in instance:\n raise Exception('Marathon instance missing \"url\" value.')\n\n # Load values from the instance config\n url = instance['url']\n instance_tags = instance.get('tags', [])\n default_timeout = self.init_config.get('default_timeout', 5)\n timeout = float(instance.get('timeout', default_timeout))\n\n response = self.get_v2_apps(url, timeout)\n if response is not None:\n self.gauge('marathon.apps', len(response['apps']), tags=instance_tags)\n for app in response['apps']:\n tags = ['app_id:' + app['id'], 'version:' + app['version']] + instance_tags\n for attr in ['taskRateLimit','instances','cpus','mem','tasksStaged','tasksRunning']:\n self.gauge('marathon.' + attr, app[attr], tags=tags)\n versions_reply = self.get_v2_app_versions(url, app['id'], timeout)\n if versions_reply is not None:\n self.gauge('marathon.versions', len(versions_reply['versions']), tags=tags)\n\n def get_v2_apps(self, url, timeout):\n # Use a hash of the URL as an aggregation key\n aggregation_key = md5(url).hexdigest()\n try:\n r = requests.get(url + \"/v2/apps\", timeout=timeout)\n except requests.exceptions.Timeout as e:\n # If there's a timeout\n self.timeout_event(url, timeout, aggregation_key)\n raise Exception(\"Timeout when hitting %s\" % url)\n\n if r.status_code != 200:\n self.status_code_event(url, r, aggregation_key)\n raise Exception(\"Got %s when hitting %s\" % (r.status_code, url))\n\n # Condition for request v1.x backward compatibility\n if hasattr(r.json, '__call__'):\n return r.json()\n else:\n return r.json\n\n def get_v2_app_versions(self, url, app_id, timeout):\n # Use a hash of the URL as an aggregation key\n aggregation_key = md5(url).hexdigest()\n \n try:\n r = requests.get(url + \"/v2/apps/\" + app_id + \"/versions\", timeout=timeout)\n except requests.exceptions.Timeout as e:\n # If there's a timeout\n self.timeout_event(url, timeout, aggregation_key)\n self.warning(\"Timeout when hitting %s\" % url)\n return None\n\n if r.status_code != 200:\n self.status_code_event(url, r, aggregation_key)\n self.warning(\"Got %s when hitting %s\" % (r.status_code, url))\n return None\n\n return r.json()\n\n def timeout_event(self, url, timeout, aggregation_key):\n self.event({\n 'timestamp': int(time.time()),\n 'event_type': 'http_check',\n 'msg_title': 'URL timeout',\n 'msg_text': '%s timed out after %s seconds.' % (url, timeout),\n 'aggregation_key': aggregation_key\n })\n\n def status_code_event(self, url, r, aggregation_key):\n self.event({\n 'timestamp': int(time.time()),\n 'event_type': 'http_check',\n 'msg_title': 'Invalid reponse code for %s' % url,\n 'msg_text': '%s returned a status of %s' % (url, r.status_code),\n 'aggregation_key': aggregation_key\n })\n", "path": "checks.d/marathon.py"}]} | 1,695 | 532 |
gh_patches_debug_27684 | rasdani/github-patches | git_diff | TheAlgorithms__Python-7406 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update gaussian_naive_bayes.py
Using the seaborn.heatmap library to plot the confusion matrix instead of the plot_confusion_matrix method from the sklearn.metrics module.
### Describe your change:
Adding to the previous update, to avoid a warning that the plot_confusion_matrix method from the sklearn.metrics throws, I have used the heatmap module of seaborn which takes an argument - the confusion matrix obtained from the confusion_matrix method in the sklearn.metrics library.
* [ ] Add an algorithm?
* [x] Fix a bug or typo in an existing algorithm?
* [ ] Documentation change?
### Checklist:
* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).
* [x] This pull request is all my own work -- I have not plagiarized.
* [x] I know that pull requests will not be merged if they fail the automated tests.
* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.
* [x] All new Python files are placed inside an existing directory.
* [x] All filenames are in all lowercase characters with no spaces or dashes.
* [x] All functions and variable names follow Python naming conventions.
* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).
* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.
* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.
* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.
</issue>
<code>
[start of machine_learning/gaussian_naive_bayes.py]
1 # Gaussian Naive Bayes Example
2 from matplotlib import pyplot as plt
3 from sklearn.datasets import load_iris
4 from sklearn.metrics import plot_confusion_matrix
5 from sklearn.model_selection import train_test_split
6 from sklearn.naive_bayes import GaussianNB
7
8
9 def main():
10
11 """
12 Gaussian Naive Bayes Example using sklearn function.
13 Iris type dataset is used to demonstrate algorithm.
14 """
15
16 # Load Iris dataset
17 iris = load_iris()
18
19 # Split dataset into train and test data
20 x = iris["data"] # features
21 y = iris["target"]
22 x_train, x_test, y_train, y_test = train_test_split(
23 x, y, test_size=0.3, random_state=1
24 )
25
26 # Gaussian Naive Bayes
27 nb_model = GaussianNB()
28 nb_model.fit(x_train, y_train)
29
30 # Display Confusion Matrix
31 plot_confusion_matrix(
32 nb_model,
33 x_test,
34 y_test,
35 display_labels=iris["target_names"],
36 cmap="Blues",
37 normalize="true",
38 )
39 plt.title("Normalized Confusion Matrix - IRIS Dataset")
40 plt.show()
41
42
43 if __name__ == "__main__":
44 main()
45
[end of machine_learning/gaussian_naive_bayes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py
--- a/machine_learning/gaussian_naive_bayes.py
+++ b/machine_learning/gaussian_naive_bayes.py
@@ -1,7 +1,9 @@
# Gaussian Naive Bayes Example
+import time
+
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
-from sklearn.metrics import plot_confusion_matrix
+from sklearn.metrics import accuracy_score, plot_confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
@@ -25,7 +27,9 @@
# Gaussian Naive Bayes
nb_model = GaussianNB()
- nb_model.fit(x_train, y_train)
+ time.sleep(2.9)
+ model_fit = nb_model.fit(x_train, y_train)
+ y_pred = model_fit.predict(x_test) # Predictions on the test set
# Display Confusion Matrix
plot_confusion_matrix(
@@ -33,12 +37,16 @@
x_test,
y_test,
display_labels=iris["target_names"],
- cmap="Blues",
+ cmap="Blues", # although, Greys_r has a better contrast...
normalize="true",
)
plt.title("Normalized Confusion Matrix - IRIS Dataset")
plt.show()
+ time.sleep(1.8)
+ final_accuracy = 100 * accuracy_score(y_true=y_test, y_pred=y_pred)
+ print(f"The overall accuracy of the model is: {round(final_accuracy, 2)}%")
+
if __name__ == "__main__":
main()
| {"golden_diff": "diff --git a/machine_learning/gaussian_naive_bayes.py b/machine_learning/gaussian_naive_bayes.py\n--- a/machine_learning/gaussian_naive_bayes.py\n+++ b/machine_learning/gaussian_naive_bayes.py\n@@ -1,7 +1,9 @@\n # Gaussian Naive Bayes Example\n+import time\n+\n from matplotlib import pyplot as plt\n from sklearn.datasets import load_iris\n-from sklearn.metrics import plot_confusion_matrix\n+from sklearn.metrics import accuracy_score, plot_confusion_matrix\n from sklearn.model_selection import train_test_split\n from sklearn.naive_bayes import GaussianNB\n \n@@ -25,7 +27,9 @@\n \n # Gaussian Naive Bayes\n nb_model = GaussianNB()\n- nb_model.fit(x_train, y_train)\n+ time.sleep(2.9)\n+ model_fit = nb_model.fit(x_train, y_train)\n+ y_pred = model_fit.predict(x_test) # Predictions on the test set\n \n # Display Confusion Matrix\n plot_confusion_matrix(\n@@ -33,12 +37,16 @@\n x_test,\n y_test,\n display_labels=iris[\"target_names\"],\n- cmap=\"Blues\",\n+ cmap=\"Blues\", # although, Greys_r has a better contrast...\n normalize=\"true\",\n )\n plt.title(\"Normalized Confusion Matrix - IRIS Dataset\")\n plt.show()\n \n+ time.sleep(1.8)\n+ final_accuracy = 100 * accuracy_score(y_true=y_test, y_pred=y_pred)\n+ print(f\"The overall accuracy of the model is: {round(final_accuracy, 2)}%\")\n+\n \n if __name__ == \"__main__\":\n main()\n", "issue": "Update gaussian_naive_bayes.py\nUsing the seaborn.heatmap library to plot the confusion matrix instead of the plot_confusion_matrix method from the sklearn.metrics module.\r\n\r\n### Describe your change:\r\nAdding to the previous update, to avoid a warning that the plot_confusion_matrix method from the sklearn.metrics throws, I have used the heatmap module of seaborn which takes an argument - the confusion matrix obtained from the confusion_matrix method in the sklearn.metrics library.\r\n\r\n\r\n* [ ] Add an algorithm?\r\n* [x] Fix a bug or typo in an existing algorithm?\r\n* [ ] Documentation change?\r\n\r\n### Checklist:\r\n* [x] I have read [CONTRIBUTING.md](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md).\r\n* [x] This pull request is all my own work -- I have not plagiarized.\r\n* [x] I know that pull requests will not be merged if they fail the automated tests.\r\n* [x] This PR only changes one algorithm file. To ease review, please open separate PRs for separate algorithms.\r\n* [x] All new Python files are placed inside an existing directory.\r\n* [x] All filenames are in all lowercase characters with no spaces or dashes.\r\n* [x] All functions and variable names follow Python naming conventions.\r\n* [x] All function parameters and return values are annotated with Python [type hints](https://docs.python.org/3/library/typing.html).\r\n* [x] All functions have [doctests](https://docs.python.org/3/library/doctest.html) that pass the automated testing.\r\n* [x] All new algorithms have a URL in its comments that points to Wikipedia or other similar explanation.\r\n* [x] If this pull request resolves one or more open issues then the commit message contains `Fixes: #{$ISSUE_NO}`.\r\n\n", "before_files": [{"content": "# Gaussian Naive Bayes Example\nfrom matplotlib import pyplot as plt\nfrom sklearn.datasets import load_iris\nfrom sklearn.metrics import plot_confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\n\n\ndef main():\n\n \"\"\"\n Gaussian Naive Bayes Example using sklearn function.\n Iris type dataset is used to demonstrate algorithm.\n \"\"\"\n\n # Load Iris dataset\n iris = load_iris()\n\n # Split dataset into train and test data\n x = iris[\"data\"] # features\n y = iris[\"target\"]\n x_train, x_test, y_train, y_test = train_test_split(\n x, y, test_size=0.3, random_state=1\n )\n\n # Gaussian Naive Bayes\n nb_model = GaussianNB()\n nb_model.fit(x_train, y_train)\n\n # Display Confusion Matrix\n plot_confusion_matrix(\n nb_model,\n x_test,\n y_test,\n display_labels=iris[\"target_names\"],\n cmap=\"Blues\",\n normalize=\"true\",\n )\n plt.title(\"Normalized Confusion Matrix - IRIS Dataset\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "machine_learning/gaussian_naive_bayes.py"}]} | 1,273 | 381 |
gh_patches_debug_4865 | rasdani/github-patches | git_diff | svthalia__concrexit-2218 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't revoke staff status for superusers
### Describe the bug
After #312, staff status will get revoked for superusers too. That shouldn't happen
### How to reproduce
Have a super user that is not in a committee.
### Expected behaviour
No staff status revoking
### Screenshots
### Additional context
</issue>
<code>
[start of website/activemembers/services.py]
1 from django.db.models import Count, Q
2 from django.utils import timezone
3
4 from activemembers.models import Committee
5 from members.models.member import Member
6
7
8 def generate_statistics() -> dict:
9 """Generate statistics about number of members in each committee."""
10 committees = Committee.active_objects.annotate(
11 member_count=(
12 Count(
13 "members",
14 filter=(
15 Q(membergroupmembership__until=None)
16 | Q(membergroupmembership__until__gte=timezone.now())
17 ),
18 )
19 )
20 )
21
22 data = {
23 "labels": [],
24 "datasets": [
25 {"data": []},
26 ],
27 }
28 for committee in committees:
29 data["labels"].append(committee.name)
30 data["datasets"][0]["data"].append(committee.member_count)
31
32 return data
33
34
35 def revoke_staff_permission_for_users_in_no_commitee():
36 members = Member.objects.filter(is_staff=True)
37 revoked = []
38 for member in members:
39 if member.get_member_groups().count() == 0:
40 revoked.append(member.id)
41 member.is_staff = False
42 member.save()
43 return Member.objects.filter(pk__in=revoked)
44
[end of website/activemembers/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/activemembers/services.py b/website/activemembers/services.py
--- a/website/activemembers/services.py
+++ b/website/activemembers/services.py
@@ -36,7 +36,7 @@
members = Member.objects.filter(is_staff=True)
revoked = []
for member in members:
- if member.get_member_groups().count() == 0:
+ if member.get_member_groups().count() == 0 and not member.is_superuser:
revoked.append(member.id)
member.is_staff = False
member.save()
| {"golden_diff": "diff --git a/website/activemembers/services.py b/website/activemembers/services.py\n--- a/website/activemembers/services.py\n+++ b/website/activemembers/services.py\n@@ -36,7 +36,7 @@\n members = Member.objects.filter(is_staff=True)\n revoked = []\n for member in members:\n- if member.get_member_groups().count() == 0:\n+ if member.get_member_groups().count() == 0 and not member.is_superuser:\n revoked.append(member.id)\n member.is_staff = False\n member.save()\n", "issue": "Don't revoke staff status for superusers\n### Describe the bug\r\nAfter #312, staff status will get revoked for superusers too. That shouldn't happen\r\n\r\n### How to reproduce\r\nHave a super user that is not in a committee.\r\n\r\n### Expected behaviour\r\nNo staff status revoking\r\n\r\n### Screenshots\r\n\r\n### Additional context\r\n\n", "before_files": [{"content": "from django.db.models import Count, Q\nfrom django.utils import timezone\n\nfrom activemembers.models import Committee\nfrom members.models.member import Member\n\n\ndef generate_statistics() -> dict:\n \"\"\"Generate statistics about number of members in each committee.\"\"\"\n committees = Committee.active_objects.annotate(\n member_count=(\n Count(\n \"members\",\n filter=(\n Q(membergroupmembership__until=None)\n | Q(membergroupmembership__until__gte=timezone.now())\n ),\n )\n )\n )\n\n data = {\n \"labels\": [],\n \"datasets\": [\n {\"data\": []},\n ],\n }\n for committee in committees:\n data[\"labels\"].append(committee.name)\n data[\"datasets\"][0][\"data\"].append(committee.member_count)\n\n return data\n\n\ndef revoke_staff_permission_for_users_in_no_commitee():\n members = Member.objects.filter(is_staff=True)\n revoked = []\n for member in members:\n if member.get_member_groups().count() == 0:\n revoked.append(member.id)\n member.is_staff = False\n member.save()\n return Member.objects.filter(pk__in=revoked)\n", "path": "website/activemembers/services.py"}]} | 931 | 127 |
gh_patches_debug_28541 | rasdani/github-patches | git_diff | pyca__cryptography-5517 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Revisit _extra_compile_args in build_openssl.py
Now that we're dropping 1.0.2 in #5511, it's time to review that.
</issue>
<code>
[start of src/_cffi_src/build_openssl.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import os
8 import sys
9 from distutils import dist
10 from distutils.ccompiler import get_default_compiler
11 from distutils.command.config import config
12
13 from _cffi_src.utils import (
14 build_ffi_for_binding,
15 compiler_type,
16 extra_link_args,
17 )
18
19
20 def _get_openssl_libraries(platform):
21 if os.environ.get("CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS", None):
22 return []
23 # OpenSSL goes by a different library name on different operating systems.
24 if platform == "win32" and compiler_type() == "msvc":
25 return [
26 "libssl",
27 "libcrypto",
28 "advapi32",
29 "crypt32",
30 "gdi32",
31 "user32",
32 "ws2_32",
33 ]
34 else:
35 # darwin, linux, mingw all use this path
36 # In some circumstances, the order in which these libs are
37 # specified on the linker command-line is significant;
38 # libssl must come before libcrypto
39 # (https://marc.info/?l=openssl-users&m=135361825921871)
40 # -lpthread required due to usage of pthread an potential
41 # existance of a static part containing e.g. pthread_atfork
42 # (https://github.com/pyca/cryptography/issues/5084)
43 if sys.platform == "zos":
44 return ["ssl", "crypto"]
45 else:
46 return ["ssl", "crypto", "pthread"]
47
48
49 def _extra_compile_args(platform):
50 """
51 We set -Wconversion args here so that we only do Wconversion checks on the
52 code we're compiling and not on cffi itself (as passing -Wconversion in
53 CFLAGS would do). We set no error on sign conversion because some
54 function signatures in OpenSSL have changed from long -> unsigned long
55 in the past. Since that isn't a precision issue we don't care.
56 When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can
57 revisit this.
58 """
59 # make sure the compiler used supports the flags to be added
60 is_gcc = False
61 if get_default_compiler() == "unix":
62 d = dist.Distribution()
63 cmd = config(d)
64 cmd._check_compiler()
65 is_gcc = (
66 "gcc" in cmd.compiler.compiler[0]
67 or "clang" in cmd.compiler.compiler[0]
68 )
69 if is_gcc or not (
70 platform in ["win32", "hp-ux11", "sunos5"]
71 or platform.startswith("aix")
72 ):
73 return ["-Wconversion", "-Wno-error=sign-conversion"]
74 else:
75 return []
76
77
78 ffi = build_ffi_for_binding(
79 module_name="_openssl",
80 module_prefix="_cffi_src.openssl.",
81 modules=[
82 # This goes first so we can define some cryptography-wide symbols.
83 "cryptography",
84 "aes",
85 "asn1",
86 "bignum",
87 "bio",
88 "cmac",
89 "conf",
90 "crypto",
91 "ct",
92 "dh",
93 "dsa",
94 "ec",
95 "ecdh",
96 "ecdsa",
97 "engine",
98 "err",
99 "evp",
100 "fips",
101 "hmac",
102 "nid",
103 "objects",
104 "ocsp",
105 "opensslv",
106 "osrandom_engine",
107 "pem",
108 "pkcs12",
109 "rand",
110 "rsa",
111 "ssl",
112 "x509",
113 "x509name",
114 "x509v3",
115 "x509_vfy",
116 "pkcs7",
117 "callbacks",
118 ],
119 libraries=_get_openssl_libraries(sys.platform),
120 # These args are passed here so that we only do Wconversion checks on the
121 # code we're compiling and not on cffi itself (as passing -Wconversion in
122 # CFLAGS would do). We set no error on sign convesrion because some
123 # function signatures in OpenSSL have changed from long -> unsigned long
124 # in the past. Since that isn't a precision issue we don't care.
125 # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can
126 # revisit this.
127 extra_compile_args=_extra_compile_args(sys.platform),
128 extra_link_args=extra_link_args(compiler_type()),
129 )
130
[end of src/_cffi_src/build_openssl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py
--- a/src/_cffi_src/build_openssl.py
+++ b/src/_cffi_src/build_openssl.py
@@ -51,10 +51,9 @@
We set -Wconversion args here so that we only do Wconversion checks on the
code we're compiling and not on cffi itself (as passing -Wconversion in
CFLAGS would do). We set no error on sign conversion because some
- function signatures in OpenSSL have changed from long -> unsigned long
- in the past. Since that isn't a precision issue we don't care.
- When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can
- revisit this.
+ function signatures in LibreSSL differ from OpenSSL have changed on long
+ vs. unsigned long in the past. Since that isn't a precision issue we don't
+ care.
"""
# make sure the compiler used supports the flags to be added
is_gcc = False
@@ -117,13 +116,6 @@
"callbacks",
],
libraries=_get_openssl_libraries(sys.platform),
- # These args are passed here so that we only do Wconversion checks on the
- # code we're compiling and not on cffi itself (as passing -Wconversion in
- # CFLAGS would do). We set no error on sign convesrion because some
- # function signatures in OpenSSL have changed from long -> unsigned long
- # in the past. Since that isn't a precision issue we don't care.
- # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can
- # revisit this.
extra_compile_args=_extra_compile_args(sys.platform),
extra_link_args=extra_link_args(compiler_type()),
)
| {"golden_diff": "diff --git a/src/_cffi_src/build_openssl.py b/src/_cffi_src/build_openssl.py\n--- a/src/_cffi_src/build_openssl.py\n+++ b/src/_cffi_src/build_openssl.py\n@@ -51,10 +51,9 @@\n We set -Wconversion args here so that we only do Wconversion checks on the\n code we're compiling and not on cffi itself (as passing -Wconversion in\n CFLAGS would do). We set no error on sign conversion because some\n- function signatures in OpenSSL have changed from long -> unsigned long\n- in the past. Since that isn't a precision issue we don't care.\n- When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can\n- revisit this.\n+ function signatures in LibreSSL differ from OpenSSL have changed on long\n+ vs. unsigned long in the past. Since that isn't a precision issue we don't\n+ care.\n \"\"\"\n # make sure the compiler used supports the flags to be added\n is_gcc = False\n@@ -117,13 +116,6 @@\n \"callbacks\",\n ],\n libraries=_get_openssl_libraries(sys.platform),\n- # These args are passed here so that we only do Wconversion checks on the\n- # code we're compiling and not on cffi itself (as passing -Wconversion in\n- # CFLAGS would do). We set no error on sign convesrion because some\n- # function signatures in OpenSSL have changed from long -> unsigned long\n- # in the past. Since that isn't a precision issue we don't care.\n- # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can\n- # revisit this.\n extra_compile_args=_extra_compile_args(sys.platform),\n extra_link_args=extra_link_args(compiler_type()),\n )\n", "issue": "Revisit _extra_compile_args in build_openssl.py\nNow that we're dropping 1.0.2 in #5511, it's time to review that.\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom distutils import dist\nfrom distutils.ccompiler import get_default_compiler\nfrom distutils.command.config import config\n\nfrom _cffi_src.utils import (\n build_ffi_for_binding,\n compiler_type,\n extra_link_args,\n)\n\n\ndef _get_openssl_libraries(platform):\n if os.environ.get(\"CRYPTOGRAPHY_SUPPRESS_LINK_FLAGS\", None):\n return []\n # OpenSSL goes by a different library name on different operating systems.\n if platform == \"win32\" and compiler_type() == \"msvc\":\n return [\n \"libssl\",\n \"libcrypto\",\n \"advapi32\",\n \"crypt32\",\n \"gdi32\",\n \"user32\",\n \"ws2_32\",\n ]\n else:\n # darwin, linux, mingw all use this path\n # In some circumstances, the order in which these libs are\n # specified on the linker command-line is significant;\n # libssl must come before libcrypto\n # (https://marc.info/?l=openssl-users&m=135361825921871)\n # -lpthread required due to usage of pthread an potential\n # existance of a static part containing e.g. pthread_atfork\n # (https://github.com/pyca/cryptography/issues/5084)\n if sys.platform == \"zos\":\n return [\"ssl\", \"crypto\"]\n else:\n return [\"ssl\", \"crypto\", \"pthread\"]\n\n\ndef _extra_compile_args(platform):\n \"\"\"\n We set -Wconversion args here so that we only do Wconversion checks on the\n code we're compiling and not on cffi itself (as passing -Wconversion in\n CFLAGS would do). We set no error on sign conversion because some\n function signatures in OpenSSL have changed from long -> unsigned long\n in the past. Since that isn't a precision issue we don't care.\n When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can\n revisit this.\n \"\"\"\n # make sure the compiler used supports the flags to be added\n is_gcc = False\n if get_default_compiler() == \"unix\":\n d = dist.Distribution()\n cmd = config(d)\n cmd._check_compiler()\n is_gcc = (\n \"gcc\" in cmd.compiler.compiler[0]\n or \"clang\" in cmd.compiler.compiler[0]\n )\n if is_gcc or not (\n platform in [\"win32\", \"hp-ux11\", \"sunos5\"]\n or platform.startswith(\"aix\")\n ):\n return [\"-Wconversion\", \"-Wno-error=sign-conversion\"]\n else:\n return []\n\n\nffi = build_ffi_for_binding(\n module_name=\"_openssl\",\n module_prefix=\"_cffi_src.openssl.\",\n modules=[\n # This goes first so we can define some cryptography-wide symbols.\n \"cryptography\",\n \"aes\",\n \"asn1\",\n \"bignum\",\n \"bio\",\n \"cmac\",\n \"conf\",\n \"crypto\",\n \"ct\",\n \"dh\",\n \"dsa\",\n \"ec\",\n \"ecdh\",\n \"ecdsa\",\n \"engine\",\n \"err\",\n \"evp\",\n \"fips\",\n \"hmac\",\n \"nid\",\n \"objects\",\n \"ocsp\",\n \"opensslv\",\n \"osrandom_engine\",\n \"pem\",\n \"pkcs12\",\n \"rand\",\n \"rsa\",\n \"ssl\",\n \"x509\",\n \"x509name\",\n \"x509v3\",\n \"x509_vfy\",\n \"pkcs7\",\n \"callbacks\",\n ],\n libraries=_get_openssl_libraries(sys.platform),\n # These args are passed here so that we only do Wconversion checks on the\n # code we're compiling and not on cffi itself (as passing -Wconversion in\n # CFLAGS would do). We set no error on sign convesrion because some\n # function signatures in OpenSSL have changed from long -> unsigned long\n # in the past. Since that isn't a precision issue we don't care.\n # When we drop support for CRYPTOGRAPHY_OPENSSL_LESS_THAN_110 we can\n # revisit this.\n extra_compile_args=_extra_compile_args(sys.platform),\n extra_link_args=extra_link_args(compiler_type()),\n)\n", "path": "src/_cffi_src/build_openssl.py"}]} | 1,890 | 423 |
gh_patches_debug_2408 | rasdani/github-patches | git_diff | tornadoweb__tornado-3167 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tornado 6.2 release readiness
I'm creating this issue to collect feedback on the 6.2 betas. For the folks who have tried them, do you think the release is ready to go or are there still more changes to be made?
Tagging @minrk and @graingert as authors of relevant PRs, although I'd welcome feedback from anyone interested in this release.
</issue>
<code>
[start of tornado/__init__.py]
1 #
2 # Copyright 2009 Facebook
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License"); you may
5 # not use this file except in compliance with the License. You may obtain
6 # a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
12 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
13 # License for the specific language governing permissions and limitations
14 # under the License.
15
16 """The Tornado web server and tools."""
17
18 # version is a human-readable version number.
19
20 # version_info is a four-tuple for programmatic comparison. The first
21 # three numbers are the components of the version number. The fourth
22 # is zero for an official release, positive for a development branch,
23 # or negative for a release candidate or beta (after the base version
24 # number has been incremented)
25 version = "6.2b2"
26 version_info = (6, 2, 0, -98)
27
[end of tornado/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tornado/__init__.py b/tornado/__init__.py
--- a/tornado/__init__.py
+++ b/tornado/__init__.py
@@ -22,5 +22,5 @@
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
-version = "6.2b2"
-version_info = (6, 2, 0, -98)
+version = "6.2"
+version_info = (6, 2, 0, 0)
| {"golden_diff": "diff --git a/tornado/__init__.py b/tornado/__init__.py\n--- a/tornado/__init__.py\n+++ b/tornado/__init__.py\n@@ -22,5 +22,5 @@\n # is zero for an official release, positive for a development branch,\n # or negative for a release candidate or beta (after the base version\n # number has been incremented)\n-version = \"6.2b2\"\n-version_info = (6, 2, 0, -98)\n+version = \"6.2\"\n+version_info = (6, 2, 0, 0)\n", "issue": "Tornado 6.2 release readiness\nI'm creating this issue to collect feedback on the 6.2 betas. For the folks who have tried them, do you think the release is ready to go or are there still more changes to be made?\r\n\r\nTagging @minrk and @graingert as authors of relevant PRs, although I'd welcome feedback from anyone interested in this release. \n", "before_files": [{"content": "#\n# Copyright 2009 Facebook\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"The Tornado web server and tools.\"\"\"\n\n# version is a human-readable version number.\n\n# version_info is a four-tuple for programmatic comparison. The first\n# three numbers are the components of the version number. The fourth\n# is zero for an official release, positive for a development branch,\n# or negative for a release candidate or beta (after the base version\n# number has been incremented)\nversion = \"6.2b2\"\nversion_info = (6, 2, 0, -98)\n", "path": "tornado/__init__.py"}]} | 911 | 133 |
gh_patches_debug_37518 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-475 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
data.umweltprofis.at: iCal service retired
I had to change the interval in my data.umweltprofis.at waste schedule when I noticed that the server throws an error when requesting a new iCal link at https://data.umweltprofis.at/opendata/AppointmentService/index.aspx
I contacted their support and they told me that this service is not going to be fixed because they are currently working on (another?) app.
Should this integration be marked as broken for the time being? Existing iCal URLs continue to work, at least for the time being.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py]
1 import logging
2 import requests
3 from waste_collection_schedule import Collection # type: ignore[attr-defined]
4 from waste_collection_schedule.service.ICS import ICS
5
6 TITLE = "UMWELTPROFIS"
7 DESCRIPTION = "Source for Umweltprofis"
8 URL = "https://www.umweltprofis.at"
9 TEST_CASES = {
10 "Ebensee": {"url": "https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1"},
11 }
12
13 _LOGGER = logging.getLogger(__name__)
14
15
16 class Source:
17 def __init__(self, url):
18 self._url = url
19 self._ics = ICS()
20
21 def fetch(self):
22 r = requests.get(self._url)
23 if r.status_code != 200:
24 _LOGGER.error("Error querying calendar data")
25 return []
26
27 fixed_text = r.text.replace("REFRESH - INTERVAL; VALUE = ", "REFRESH-INTERVAL;VALUE=")
28
29 dates = self._ics.convert(fixed_text)
30
31 entries = []
32 for d in dates:
33 entries.append(Collection(d[0], d[1]))
34 return entries
35
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py
@@ -1,5 +1,7 @@
import logging
import requests
+from datetime import datetime
+from xml.dom.minidom import parseString
from waste_collection_schedule import Collection # type: ignore[attr-defined]
from waste_collection_schedule.service.ICS import ICS
@@ -8,17 +10,33 @@
URL = "https://www.umweltprofis.at"
TEST_CASES = {
"Ebensee": {"url": "https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1"},
+ "Rohrbach": {"xmlurl": "https://data.umweltprofis.at/opendata/AppointmentService/AppointmentService.asmx/GetTermineForLocationSecured?Key=TEMPKeyabvvMKVCic0cMcmsTEMPKey&StreetNr=118213&HouseNr=Alle&intervall=Alle"},
}
_LOGGER = logging.getLogger(__name__)
+def getText(element):
+ s = ""
+ for e in element.childNodes:
+ if e.nodeType == e.TEXT_NODE:
+ s += e.nodeValue
+ return s
class Source:
- def __init__(self, url):
+ def __init__(self, url=None, xmlurl=None):
self._url = url
+ self._xmlurl = xmlurl
self._ics = ICS()
+ if url is None and xmlurl is None:
+ raise Exception("either url or xmlurl needs to be specified")
def fetch(self):
+ if self._url is not None:
+ return self.fetch_ics()
+ elif self._xmlurl is not None:
+ return self.fetch_xml()
+
+ def fetch_ics(self):
r = requests.get(self._url)
if r.status_code != 200:
_LOGGER.error("Error querying calendar data")
@@ -32,3 +50,18 @@
for d in dates:
entries.append(Collection(d[0], d[1]))
return entries
+
+ def fetch_xml(self):
+ r = requests.get(self._xmlurl)
+ r.raise_for_status()
+
+ doc = parseString(r.text)
+ appointments = doc.getElementsByTagName("AppointmentEntry")
+
+ entries = []
+ for a in appointments:
+ date_string = getText(a.getElementsByTagName("Datum")[0])
+ date = datetime.fromisoformat(date_string).date()
+ waste_type = getText(a.getElementsByTagName("WasteType")[0])
+ entries.append(Collection(date, waste_type))
+ return entries
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py\n@@ -1,5 +1,7 @@\n import logging\n import requests\n+from datetime import datetime\n+from xml.dom.minidom import parseString\n from waste_collection_schedule import Collection # type: ignore[attr-defined]\n from waste_collection_schedule.service.ICS import ICS\n \n@@ -8,17 +10,33 @@\n URL = \"https://www.umweltprofis.at\"\n TEST_CASES = {\n \"Ebensee\": {\"url\": \"https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1\"},\n+ \"Rohrbach\": {\"xmlurl\": \"https://data.umweltprofis.at/opendata/AppointmentService/AppointmentService.asmx/GetTermineForLocationSecured?Key=TEMPKeyabvvMKVCic0cMcmsTEMPKey&StreetNr=118213&HouseNr=Alle&intervall=Alle\"},\n }\n \n _LOGGER = logging.getLogger(__name__)\n \n+def getText(element):\n+ s = \"\"\n+ for e in element.childNodes:\n+ if e.nodeType == e.TEXT_NODE:\n+ s += e.nodeValue\n+ return s\n \n class Source:\n- def __init__(self, url):\n+ def __init__(self, url=None, xmlurl=None):\n self._url = url\n+ self._xmlurl = xmlurl\n self._ics = ICS()\n+ if url is None and xmlurl is None:\n+ raise Exception(\"either url or xmlurl needs to be specified\")\n \n def fetch(self):\n+ if self._url is not None:\n+ return self.fetch_ics()\n+ elif self._xmlurl is not None:\n+ return self.fetch_xml()\n+\n+ def fetch_ics(self):\n r = requests.get(self._url)\n if r.status_code != 200:\n _LOGGER.error(\"Error querying calendar data\")\n@@ -32,3 +50,18 @@\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n+\n+ def fetch_xml(self):\n+ r = requests.get(self._xmlurl)\n+ r.raise_for_status()\n+\n+ doc = parseString(r.text)\n+ appointments = doc.getElementsByTagName(\"AppointmentEntry\")\n+\n+ entries = []\n+ for a in appointments:\n+ date_string = getText(a.getElementsByTagName(\"Datum\")[0])\n+ date = datetime.fromisoformat(date_string).date()\n+ waste_type = getText(a.getElementsByTagName(\"WasteType\")[0])\n+ entries.append(Collection(date, waste_type))\n+ return entries\n", "issue": "data.umweltprofis.at: iCal service retired\nI had to change the interval in my data.umweltprofis.at waste schedule when I noticed that the server throws an error when requesting a new iCal link at https://data.umweltprofis.at/opendata/AppointmentService/index.aspx\r\n\r\nI contacted their support and they told me that this service is not going to be fixed because they are currently working on (another?) app.\r\n\r\nShould this integration be marked as broken for the time being? Existing iCal URLs continue to work, at least for the time being.\n", "before_files": [{"content": "import logging\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"UMWELTPROFIS\"\nDESCRIPTION = \"Source for Umweltprofis\"\nURL = \"https://www.umweltprofis.at\"\nTEST_CASES = {\n \"Ebensee\": {\"url\": \"https://data.umweltprofis.at/OpenData/AppointmentService/AppointmentService.asmx/GetIcalWastePickupCalendar?key=KXX_K0bIXDdk0NrTkk3xWqLM9-bsNgIVBE6FMXDObTqxmp9S39nIqwhf9LTIAX9shrlpfCYU7TG_8pS9NjkAJnM_ruQ1SYm3V9YXVRfLRws1\"},\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, url):\n self._url = url\n self._ics = ICS()\n\n def fetch(self):\n r = requests.get(self._url)\n if r.status_code != 200:\n _LOGGER.error(\"Error querying calendar data\")\n return []\n\n fixed_text = r.text.replace(\"REFRESH - INTERVAL; VALUE = \", \"REFRESH-INTERVAL;VALUE=\")\n\n dates = self._ics.convert(fixed_text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/data_umweltprofis_at.py"}]} | 1,077 | 736 |
gh_patches_debug_13843 | rasdani/github-patches | git_diff | rucio__rucio-1799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
REST call for requests is broken by '/' in DIDs
Motivation
----------
As discussed in #1786 , the REST call of https://github.com/rucio/rucio/blob/master/lib/rucio/web/rest/webpy/v1/request.py get broken for DIDs containing `/`
Modification
------------
I think that rather than doing this (*) we can pass all the needed information (scope, name, rse) as params instead of including them in the url path.
(*) https://github.com/rucio/rucio/blob/master/lib/rucio/web/rest/webpy/v1/request.py#L38
</issue>
<code>
[start of lib/rucio/web/rest/webpy/v1/request.py]
1 #!/usr/bin/env python
2 # Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 #
16 # Authors:
17 # - Mario Lassnig <[email protected]>, 2014-2018
18 # - Vincent Garonne <[email protected]>, 2017
19 #
20 # PY3K COMPATIBLE
21
22 import json
23
24 from logging import getLogger, StreamHandler, DEBUG
25
26 from web import application, ctx, loadhook, header
27
28 from rucio.api import request
29 from rucio.common.utils import generate_http_error, APIEncoder
30 from rucio.web.rest.common import rucio_loadhook, RucioController, exception_wrapper
31
32
33 LOGGER = getLogger("rucio.request")
34 SH = StreamHandler()
35 SH.setLevel(DEBUG)
36 LOGGER.addHandler(SH)
37
38 URLS = ('/(.+)/(.+)/(.+)', 'RequestGet',)
39
40
41 class RequestGet(RucioController):
42 """ REST API to get requests. """
43
44 @exception_wrapper
45 def GET(self, scope, name, rse):
46 """
47 List request for given DID to a destination RSE.
48
49 HTTP Success:
50 200 OK
51
52 HTTP Error:
53 401 Unauthorized
54 404 Request Not Found
55 """
56
57 header('Content-Type', 'application/json')
58
59 try:
60 return json.dumps(request.get_request_by_did(scope=scope,
61 name=name,
62 rse=rse,
63 issuer=ctx.env.get('issuer')),
64 cls=APIEncoder)
65 except:
66 raise generate_http_error(404, 'RequestNotFound', 'No request found for DID %s:%s at RSE %s' % (scope,
67 name,
68 rse))
69
70
71 """----------------------
72 Web service startup
73 ----------------------"""
74
75 APP = application(URLS, globals())
76 APP.add_processor(loadhook(rucio_loadhook))
77 application = APP.wsgifunc()
78
[end of lib/rucio/web/rest/webpy/v1/request.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/rucio/web/rest/webpy/v1/request.py b/lib/rucio/web/rest/webpy/v1/request.py
--- a/lib/rucio/web/rest/webpy/v1/request.py
+++ b/lib/rucio/web/rest/webpy/v1/request.py
@@ -26,6 +26,7 @@
from web import application, ctx, loadhook, header
from rucio.api import request
+from rucio.common.schema import SCOPE_NAME_REGEXP
from rucio.common.utils import generate_http_error, APIEncoder
from rucio.web.rest.common import rucio_loadhook, RucioController, exception_wrapper
@@ -35,7 +36,7 @@
SH.setLevel(DEBUG)
LOGGER.addHandler(SH)
-URLS = ('/(.+)/(.+)/(.+)', 'RequestGet',)
+URLS = ('%s/(.+)' % SCOPE_NAME_REGEXP, 'RequestGet',)
class RequestGet(RucioController):
| {"golden_diff": "diff --git a/lib/rucio/web/rest/webpy/v1/request.py b/lib/rucio/web/rest/webpy/v1/request.py\n--- a/lib/rucio/web/rest/webpy/v1/request.py\n+++ b/lib/rucio/web/rest/webpy/v1/request.py\n@@ -26,6 +26,7 @@\n from web import application, ctx, loadhook, header\n \n from rucio.api import request\n+from rucio.common.schema import SCOPE_NAME_REGEXP\n from rucio.common.utils import generate_http_error, APIEncoder\n from rucio.web.rest.common import rucio_loadhook, RucioController, exception_wrapper\n \n@@ -35,7 +36,7 @@\n SH.setLevel(DEBUG)\n LOGGER.addHandler(SH)\n \n-URLS = ('/(.+)/(.+)/(.+)', 'RequestGet',)\n+URLS = ('%s/(.+)' % SCOPE_NAME_REGEXP, 'RequestGet',)\n \n \n class RequestGet(RucioController):\n", "issue": "REST call for requests is broken by '/' in DIDs\nMotivation\r\n----------\r\nAs discussed in #1786 , the REST call of https://github.com/rucio/rucio/blob/master/lib/rucio/web/rest/webpy/v1/request.py get broken for DIDs containing `/`\r\n\r\nModification\r\n------------\r\nI think that rather than doing this (*) we can pass all the needed information (scope, name, rse) as params instead of including them in the url path. \r\n\r\n(*) https://github.com/rucio/rucio/blob/master/lib/rucio/web/rest/webpy/v1/request.py#L38\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright 2012-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Mario Lassnig <[email protected]>, 2014-2018\n# - Vincent Garonne <[email protected]>, 2017\n#\n# PY3K COMPATIBLE\n\nimport json\n\nfrom logging import getLogger, StreamHandler, DEBUG\n\nfrom web import application, ctx, loadhook, header\n\nfrom rucio.api import request\nfrom rucio.common.utils import generate_http_error, APIEncoder\nfrom rucio.web.rest.common import rucio_loadhook, RucioController, exception_wrapper\n\n\nLOGGER = getLogger(\"rucio.request\")\nSH = StreamHandler()\nSH.setLevel(DEBUG)\nLOGGER.addHandler(SH)\n\nURLS = ('/(.+)/(.+)/(.+)', 'RequestGet',)\n\n\nclass RequestGet(RucioController):\n \"\"\" REST API to get requests. \"\"\"\n\n @exception_wrapper\n def GET(self, scope, name, rse):\n \"\"\"\n List request for given DID to a destination RSE.\n\n HTTP Success:\n 200 OK\n\n HTTP Error:\n 401 Unauthorized\n 404 Request Not Found\n \"\"\"\n\n header('Content-Type', 'application/json')\n\n try:\n return json.dumps(request.get_request_by_did(scope=scope,\n name=name,\n rse=rse,\n issuer=ctx.env.get('issuer')),\n cls=APIEncoder)\n except:\n raise generate_http_error(404, 'RequestNotFound', 'No request found for DID %s:%s at RSE %s' % (scope,\n name,\n rse))\n\n\n\"\"\"----------------------\n Web service startup\n----------------------\"\"\"\n\nAPP = application(URLS, globals())\nAPP.add_processor(loadhook(rucio_loadhook))\napplication = APP.wsgifunc()\n", "path": "lib/rucio/web/rest/webpy/v1/request.py"}]} | 1,382 | 212 |
gh_patches_debug_23537 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-6652 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
GCP- dataflow deprecated view and now doesn't return complete information
Looks like gcp deprecated the dataflow list ability to get all dataflow job information. It only returns `JOB_VIEW_SUMMARY `
https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs/list
In order to get the required information a get call on every resource will be required. Should this be done on the resource level as an augmentation or in an additional filter since the jobs could be filtered on state in a value filter before the extra api calls.
</issue>
<code>
[start of tools/c7n_gcp/c7n_gcp/resources/dataflow.py]
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3 import jmespath
4
5 from c7n_gcp.provider import resources
6 from c7n_gcp.query import QueryResourceManager, TypeInfo
7
8
9 @resources.register('dataflow-job')
10 class DataflowJob(QueryResourceManager):
11 """GCP resource: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs
12 """
13
14 class resource_type(TypeInfo):
15 service = 'dataflow'
16 version = 'v1b3'
17 component = 'projects.jobs'
18 enum_spec = ('aggregated', 'jobs[]', None)
19 scope_key = 'projectId'
20 name = id = 'name'
21 get_requires_event = True
22 default_report_fields = [
23 'name', 'currentState', 'createTime', 'location']
24 permissions = ('dataflow.jobs.list',)
25
26 @staticmethod
27 def get(client, event):
28 return client.execute_command(
29 'get', {
30 'projectId': jmespath.search('resource.labels.project_id', event),
31 'jobId': jmespath.search('protoPayload.request.job_id', event)
32 }
33 )
34
[end of tools/c7n_gcp/c7n_gcp/resources/dataflow.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/tools/c7n_gcp/c7n_gcp/resources/dataflow.py b/tools/c7n_gcp/c7n_gcp/resources/dataflow.py
--- a/tools/c7n_gcp/c7n_gcp/resources/dataflow.py
+++ b/tools/c7n_gcp/c7n_gcp/resources/dataflow.py
@@ -1,6 +1,7 @@
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
+from googleapiclient.errors import HttpError
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@@ -31,3 +32,27 @@
'jobId': jmespath.search('protoPayload.request.job_id', event)
}
)
+
+ def resources(self, query=None):
+ query_filter = 'ACTIVE'
+ if self.data.get('query'):
+ query_filter = self.data['query'][0].get('filter', 'ACTIVE')
+
+ return super(DataflowJob, self).resources(query={'filter': query_filter})
+
+ def augment(self, resources):
+ client = self.get_client()
+ results = []
+ for r in resources:
+ ref = {
+ 'jobId': r['id'],
+ 'projectId': r['projectId'],
+ 'view': 'JOB_VIEW_ALL'
+ }
+ try:
+ results.append(
+ client.execute_query(
+ 'get', verb_arguments=ref))
+ except HttpError:
+ results.append(r)
+ return results
| {"golden_diff": "diff --git a/tools/c7n_gcp/c7n_gcp/resources/dataflow.py b/tools/c7n_gcp/c7n_gcp/resources/dataflow.py\n--- a/tools/c7n_gcp/c7n_gcp/resources/dataflow.py\n+++ b/tools/c7n_gcp/c7n_gcp/resources/dataflow.py\n@@ -1,6 +1,7 @@\n # Copyright The Cloud Custodian Authors.\n # SPDX-License-Identifier: Apache-2.0\n import jmespath\n+from googleapiclient.errors import HttpError\n \n from c7n_gcp.provider import resources\n from c7n_gcp.query import QueryResourceManager, TypeInfo\n@@ -31,3 +32,27 @@\n 'jobId': jmespath.search('protoPayload.request.job_id', event)\n }\n )\n+\n+ def resources(self, query=None):\n+ query_filter = 'ACTIVE'\n+ if self.data.get('query'):\n+ query_filter = self.data['query'][0].get('filter', 'ACTIVE')\n+\n+ return super(DataflowJob, self).resources(query={'filter': query_filter})\n+\n+ def augment(self, resources):\n+ client = self.get_client()\n+ results = []\n+ for r in resources:\n+ ref = {\n+ 'jobId': r['id'],\n+ 'projectId': r['projectId'],\n+ 'view': 'JOB_VIEW_ALL'\n+ }\n+ try:\n+ results.append(\n+ client.execute_query(\n+ 'get', verb_arguments=ref))\n+ except HttpError:\n+ results.append(r)\n+ return results\n", "issue": "GCP- dataflow deprecated view and now doesn't return complete information\nLooks like gcp deprecated the dataflow list ability to get all dataflow job information. It only returns `JOB_VIEW_SUMMARY `\r\n\r\nhttps://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs/list\r\n\r\nIn order to get the required information a get call on every resource will be required. Should this be done on the resource level as an augmentation or in an additional filter since the jobs could be filtered on state in a value filter before the extra api calls. \n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nimport jmespath\n\nfrom c7n_gcp.provider import resources\nfrom c7n_gcp.query import QueryResourceManager, TypeInfo\n\n\[email protected]('dataflow-job')\nclass DataflowJob(QueryResourceManager):\n \"\"\"GCP resource: https://cloud.google.com/dataflow/docs/reference/rest/v1b3/projects.jobs\n \"\"\"\n\n class resource_type(TypeInfo):\n service = 'dataflow'\n version = 'v1b3'\n component = 'projects.jobs'\n enum_spec = ('aggregated', 'jobs[]', None)\n scope_key = 'projectId'\n name = id = 'name'\n get_requires_event = True\n default_report_fields = [\n 'name', 'currentState', 'createTime', 'location']\n permissions = ('dataflow.jobs.list',)\n\n @staticmethod\n def get(client, event):\n return client.execute_command(\n 'get', {\n 'projectId': jmespath.search('resource.labels.project_id', event),\n 'jobId': jmespath.search('protoPayload.request.job_id', event)\n }\n )\n", "path": "tools/c7n_gcp/c7n_gcp/resources/dataflow.py"}]} | 974 | 349 |
gh_patches_debug_1601 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1940 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Jinja2 2.10 security vulnerability reported by GitHub
Hi
I just got an alert on my github repo (where we use mkdocs to build our doc) for Jinja2 2.10:
- https://github.com/eclipse/openj9-docs/network/alert/buildenv/requirements.txt/Jinja2/open
Recommendation is to move to 2.10.1.
Are there any known issues with moving to this level? Are there any plans to update the Mkdocs dependency list to this level?
Thanks!
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 import re
5 import os
6 import sys
7
8
9 long_description = (
10 "MkDocs is a fast, simple and downright gorgeous static site generator "
11 "that's geared towards building project documentation. Documentation "
12 "source files are written in Markdown, and configured with a single YAML "
13 "configuration file."
14 )
15
16
17 def get_version(package):
18 """Return package version as listed in `__version__` in `init.py`."""
19 init_py = open(os.path.join(package, '__init__.py')).read()
20 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
21
22
23 def get_packages(package):
24 """Return root package and all sub-packages."""
25 return [dirpath
26 for dirpath, dirnames, filenames in os.walk(package)
27 if os.path.exists(os.path.join(dirpath, '__init__.py'))]
28
29
30 if sys.argv[-1] == 'publish':
31 if os.system("pip freeze | grep wheel"):
32 print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
33 sys.exit()
34 if os.system("pip freeze | grep twine"):
35 print("twine not installed.\nUse `pip install twine`.\nExiting.")
36 sys.exit()
37 os.system("python setup.py sdist bdist_wheel")
38 os.system("twine upload dist/*")
39 print("You probably want to also tag the version now:")
40 print(" git tag -a {0} -m 'version {0}'".format(get_version("mkdocs")))
41 print(" git push --tags")
42 sys.exit()
43
44
45 setup(
46 name="mkdocs",
47 version=get_version("mkdocs"),
48 url='https://www.mkdocs.org',
49 license='BSD',
50 description='Project documentation with Markdown.',
51 long_description=long_description,
52 author='Tom Christie',
53 author_email='[email protected]', # SEE NOTE BELOW (*)
54 packages=get_packages("mkdocs"),
55 include_package_data=True,
56 install_requires=[
57 'click>=3.3',
58 'Jinja2>=2.7.1',
59 'livereload>=2.5.1',
60 'lunr[languages]>=0.5.2',
61 'Markdown>=2.3.1',
62 'PyYAML>=3.10',
63 'tornado>=5.0'
64 ],
65 python_requires='>=3.5',
66 entry_points={
67 'console_scripts': [
68 'mkdocs = mkdocs.__main__:cli',
69 ],
70 'mkdocs.themes': [
71 'mkdocs = mkdocs.themes.mkdocs',
72 'readthedocs = mkdocs.themes.readthedocs',
73 ],
74 'mkdocs.plugins': [
75 'search = mkdocs.contrib.search:SearchPlugin',
76 ],
77 },
78 classifiers=[
79 'Development Status :: 5 - Production/Stable',
80 'Environment :: Console',
81 'Environment :: Web Environment',
82 'Intended Audience :: Developers',
83 'License :: OSI Approved :: BSD License',
84 'Operating System :: OS Independent',
85 'Programming Language :: Python',
86 'Programming Language :: Python :: 3',
87 'Programming Language :: Python :: 3.5',
88 'Programming Language :: Python :: 3.6',
89 'Programming Language :: Python :: 3.7',
90 'Programming Language :: Python :: 3 :: Only',
91 "Programming Language :: Python :: Implementation :: CPython",
92 "Programming Language :: Python :: Implementation :: PyPy",
93 'Topic :: Documentation',
94 'Topic :: Text Processing',
95 ],
96 zip_safe=False,
97 )
98
99 # (*) Please direct queries to the discussion group:
100 # https://groups.google.com/forum/#!forum/mkdocs
101
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -55,7 +55,7 @@
include_package_data=True,
install_requires=[
'click>=3.3',
- 'Jinja2>=2.7.1',
+ 'Jinja2>=2.10.1',
'livereload>=2.5.1',
'lunr[languages]>=0.5.2',
'Markdown>=2.3.1',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -55,7 +55,7 @@\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n- 'Jinja2>=2.7.1',\n+ 'Jinja2>=2.10.1',\n 'livereload>=2.5.1',\n 'lunr[languages]>=0.5.2',\n 'Markdown>=2.3.1',\n", "issue": "Jinja2 2.10 security vulnerability reported by GitHub\nHi\r\n\r\nI just got an alert on my github repo (where we use mkdocs to build our doc) for Jinja2 2.10:\r\n\r\n- https://github.com/eclipse/openj9-docs/network/alert/buildenv/requirements.txt/Jinja2/open\r\n\r\nRecommendation is to move to 2.10.1.\r\n\r\nAre there any known issues with moving to this level? Are there any plans to update the Mkdocs dependency list to this level? \r\n\r\nThanks!\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nimport re\nimport os\nimport sys\n\n\nlong_description = (\n \"MkDocs is a fast, simple and downright gorgeous static site generator \"\n \"that's geared towards building project documentation. Documentation \"\n \"source files are written in Markdown, and configured with a single YAML \"\n \"configuration file.\"\n)\n\n\ndef get_version(package):\n \"\"\"Return package version as listed in `__version__` in `init.py`.\"\"\"\n init_py = open(os.path.join(package, '__init__.py')).read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_packages(package):\n \"\"\"Return root package and all sub-packages.\"\"\"\n return [dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, '__init__.py'))]\n\n\nif sys.argv[-1] == 'publish':\n if os.system(\"pip freeze | grep wheel\"):\n print(\"wheel not installed.\\nUse `pip install wheel`.\\nExiting.\")\n sys.exit()\n if os.system(\"pip freeze | grep twine\"):\n print(\"twine not installed.\\nUse `pip install twine`.\\nExiting.\")\n sys.exit()\n os.system(\"python setup.py sdist bdist_wheel\")\n os.system(\"twine upload dist/*\")\n print(\"You probably want to also tag the version now:\")\n print(\" git tag -a {0} -m 'version {0}'\".format(get_version(\"mkdocs\")))\n print(\" git push --tags\")\n sys.exit()\n\n\nsetup(\n name=\"mkdocs\",\n version=get_version(\"mkdocs\"),\n url='https://www.mkdocs.org',\n license='BSD',\n description='Project documentation with Markdown.',\n long_description=long_description,\n author='Tom Christie',\n author_email='[email protected]', # SEE NOTE BELOW (*)\n packages=get_packages(\"mkdocs\"),\n include_package_data=True,\n install_requires=[\n 'click>=3.3',\n 'Jinja2>=2.7.1',\n 'livereload>=2.5.1',\n 'lunr[languages]>=0.5.2',\n 'Markdown>=2.3.1',\n 'PyYAML>=3.10',\n 'tornado>=5.0'\n ],\n python_requires='>=3.5',\n entry_points={\n 'console_scripts': [\n 'mkdocs = mkdocs.__main__:cli',\n ],\n 'mkdocs.themes': [\n 'mkdocs = mkdocs.themes.mkdocs',\n 'readthedocs = mkdocs.themes.readthedocs',\n ],\n 'mkdocs.plugins': [\n 'search = mkdocs.contrib.search:SearchPlugin',\n ],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n 'Topic :: Documentation',\n 'Topic :: Text Processing',\n ],\n zip_safe=False,\n)\n\n# (*) Please direct queries to the discussion group:\n# https://groups.google.com/forum/#!forum/mkdocs\n", "path": "setup.py"}]} | 1,645 | 114 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.