problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_3003 | rasdani/github-patches | git_diff | lightly-ai__lightly-656 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Incorrect inputsize for BarlowTwins Lightning Example Code
Should the input_size in [1] be `32` instead of `224`?
In [2], we use `input_size=32`.
[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44
[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35
</issue>
<code>
[start of examples/pytorch_lightning/barlowtwins.py]
1 import torch
2 from torch import nn
3 import torchvision
4 import pytorch_lightning as pl
5
6 from lightly.data import LightlyDataset
7 from lightly.data import ImageCollateFunction
8 from lightly.loss import BarlowTwinsLoss
9 from lightly.models.modules import BarlowTwinsProjectionHead
10
11
12 class BarlowTwins(pl.LightningModule):
13 def __init__(self):
14 super().__init__()
15 resnet = torchvision.models.resnet18()
16 self.backbone = nn.Sequential(*list(resnet.children())[:-1])
17 self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)
18 self.criterion = BarlowTwinsLoss()
19
20 def forward(self, x):
21 x = self.backbone(x).flatten(start_dim=1)
22 z = self.projection_head(x)
23 return z
24
25 def training_step(self, batch, batch_index):
26 (x0, x1), _, _ = batch
27 z0 = self.forward(x0)
28 z1 = self.forward(x1)
29 loss = self.criterion(z0, z1)
30 return loss
31
32 def configure_optimizers(self):
33 optim = torch.optim.SGD(self.parameters(), lr=0.06)
34 return optim
35
36
37 model = BarlowTwins()
38
39 cifar10 = torchvision.datasets.CIFAR10("datasets/cifar10", download=True)
40 dataset = LightlyDataset.from_torch_dataset(cifar10)
41 # or create a dataset from a folder containing images or videos:
42 # dataset = LightlyDataset("path/to/folder")
43
44 collate_fn = ImageCollateFunction(input_size=224)
45
46 dataloader = torch.utils.data.DataLoader(
47 dataset,
48 batch_size=256,
49 collate_fn=collate_fn,
50 shuffle=True,
51 drop_last=True,
52 num_workers=8,
53 )
54
55 gpus = 1 if torch.cuda.is_available() else 0
56
57 trainer = pl.Trainer(max_epochs=10, gpus=gpus)
58 trainer.fit(model=model, train_dataloaders=dataloader)
59
[end of examples/pytorch_lightning/barlowtwins.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py
--- a/examples/pytorch_lightning/barlowtwins.py
+++ b/examples/pytorch_lightning/barlowtwins.py
@@ -41,7 +41,7 @@
# or create a dataset from a folder containing images or videos:
# dataset = LightlyDataset("path/to/folder")
-collate_fn = ImageCollateFunction(input_size=224)
+collate_fn = ImageCollateFunction(input_size=32)
dataloader = torch.utils.data.DataLoader(
dataset,
| {"golden_diff": "diff --git a/examples/pytorch_lightning/barlowtwins.py b/examples/pytorch_lightning/barlowtwins.py\n--- a/examples/pytorch_lightning/barlowtwins.py\n+++ b/examples/pytorch_lightning/barlowtwins.py\n@@ -41,7 +41,7 @@\n # or create a dataset from a folder containing images or videos:\n # dataset = LightlyDataset(\"path/to/folder\")\n \n-collate_fn = ImageCollateFunction(input_size=224)\n+collate_fn = ImageCollateFunction(input_size=32)\n \n dataloader = torch.utils.data.DataLoader(\n dataset,\n", "issue": "Incorrect inputsize for BarlowTwins Lightning Example Code\nShould the input_size in [1] be `32` instead of `224`?\r\nIn [2], we use `input_size=32`.\r\n\r\n[1] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch_lightning/barlowtwins.py#L44\r\n[2] https://github.com/lightly-ai/lightly/blob/master/examples/pytorch/barlowtwins.py#L35\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom torch import nn\nimport torchvision\nimport pytorch_lightning as pl\n\nfrom lightly.data import LightlyDataset\nfrom lightly.data import ImageCollateFunction\nfrom lightly.loss import BarlowTwinsLoss\nfrom lightly.models.modules import BarlowTwinsProjectionHead\n\n\nclass BarlowTwins(pl.LightningModule):\n def __init__(self):\n super().__init__()\n resnet = torchvision.models.resnet18()\n self.backbone = nn.Sequential(*list(resnet.children())[:-1])\n self.projection_head = BarlowTwinsProjectionHead(512, 2048, 2048)\n self.criterion = BarlowTwinsLoss()\n\n def forward(self, x):\n x = self.backbone(x).flatten(start_dim=1)\n z = self.projection_head(x)\n return z\n\n def training_step(self, batch, batch_index):\n (x0, x1), _, _ = batch\n z0 = self.forward(x0)\n z1 = self.forward(x1)\n loss = self.criterion(z0, z1)\n return loss\n\n def configure_optimizers(self):\n optim = torch.optim.SGD(self.parameters(), lr=0.06)\n return optim\n\n\nmodel = BarlowTwins()\n\ncifar10 = torchvision.datasets.CIFAR10(\"datasets/cifar10\", download=True)\ndataset = LightlyDataset.from_torch_dataset(cifar10)\n# or create a dataset from a folder containing images or videos:\n# dataset = LightlyDataset(\"path/to/folder\")\n\ncollate_fn = ImageCollateFunction(input_size=224)\n\ndataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=256,\n collate_fn=collate_fn,\n shuffle=True,\n drop_last=True,\n num_workers=8,\n)\n\ngpus = 1 if torch.cuda.is_available() else 0\n\ntrainer = pl.Trainer(max_epochs=10, gpus=gpus)\ntrainer.fit(model=model, train_dataloaders=dataloader)\n", "path": "examples/pytorch_lightning/barlowtwins.py"}]} | 1,215 | 135 |
gh_patches_debug_23350 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5768 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation formatter is turning "--" into "–" (en dash) in the description field
In the [CLI Command Reference](https://www.checkov.io/2.Basics/CLI%20Command%20Reference.html) documentation, all the examples that use `--` options in the "Description" column are coming through as `–` (en dash). This makes it impossible to actually copy these examples for use. They are correct in [the Markdown source file](https://github.com/bridgecrewio/checkov/blob/main/docs/2.Basics/CLI%20Command%20Reference.md). To verify, use the browser search for `--output-file-path` and you'll find only one search result instead of two.
</issue>
<code>
[start of checkov/common/parallelizer/parallel_runner.py]
1 from __future__ import annotations
2
3 import concurrent.futures
4 import logging
5 import multiprocessing
6 import os
7 import platform
8 from collections.abc import Iterator, Iterable
9 from multiprocessing.pool import Pool
10 from typing import Any, List, Generator, Callable, Optional, TypeVar, TYPE_CHECKING
11
12 from checkov.common.models.enums import ParallelizationType
13
14 if TYPE_CHECKING:
15 from multiprocessing.connection import Connection
16
17 _T = TypeVar("_T")
18
19
20 class ParallelRunner:
21 def __init__(
22 self, workers_number: int | None = None, parallelization_type: ParallelizationType = ParallelizationType.FORK
23 ) -> None:
24 self.workers_number = (workers_number if workers_number else os.cpu_count()) or 1
25 self.os = platform.system()
26 self.type: str | ParallelizationType = parallelization_type
27
28 custom_type = os.getenv("CHECKOV_PARALLELIZATION_TYPE")
29 if custom_type:
30 self.type = custom_type.lower()
31
32 if not custom_type and os.getenv("PYCHARM_HOSTED") == "1":
33 # PYCHARM_HOSTED env variable equals 1 when debugging via jetbrains IDE.
34 # To prevent JetBrains IDE from crashing on debug run sequentially
35 self.type = ParallelizationType.NONE
36 elif self.os == "Windows":
37 # 'fork' mode is not supported on 'Windows'
38 # 'spawn' mode results in a strange error, which needs to be investigated on an actual Windows machine
39 self.type = ParallelizationType.THREAD
40
41 def run_function(
42 self,
43 func: Callable[..., _T],
44 items: List[Any],
45 group_size: Optional[int] = None,
46 ) -> Iterable[_T]:
47 if self.type == ParallelizationType.THREAD:
48 return self._run_function_multithreaded(func, items)
49 elif self.type == ParallelizationType.FORK:
50 return self._run_function_multiprocess_fork(func, items, group_size)
51 elif self.type == ParallelizationType.SPAWN:
52 return self._run_function_multiprocess_spawn(func, items, group_size)
53 else:
54 return self._run_function_sequential(func, items)
55
56 def _run_function_multiprocess_fork(
57 self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]
58 ) -> Generator[_T, None, Iterable[_T]]:
59 if multiprocessing.current_process().daemon:
60 # can't fork, when already inside a pool
61 return self._run_function_multithreaded(func, items) # noqa: B901
62
63 if not group_size:
64 group_size = int(len(items) / self.workers_number) + 1
65 groups_of_items = [items[i : i + group_size] for i in range(0, len(items), group_size)]
66
67 def func_wrapper(original_func: Callable[[Any], _T], items_group: List[Any], connection: Connection) -> None:
68 for item in items_group:
69 try:
70 if isinstance(item, tuple):
71 # unpack a tuple to pass multiple arguments to the target function
72 result = original_func(*item)
73 else:
74 result = original_func(item)
75 except Exception:
76 logging.error(
77 f"Failed to invoke function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with {item}",
78 exc_info=True,
79 )
80 result = None
81
82 connection.send(result)
83 connection.close()
84
85 logging.debug(
86 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'fork'"
87 )
88 processes = []
89 for group_of_items in groups_of_items:
90 parent_conn, child_conn = multiprocessing.Pipe(duplex=False)
91 process = multiprocessing.get_context("fork").Process(
92 target=func_wrapper, args=(func, group_of_items, child_conn)
93 )
94 processes.append((process, parent_conn, len(group_of_items)))
95 process.start()
96
97 for _, parent_conn, group_len in processes:
98 for _ in range(group_len):
99 try:
100 yield parent_conn.recv()
101 except EOFError:
102 pass
103
104 return []
105
106 def _run_function_multiprocess_spawn(
107 self, func: Callable[[Any], _T], items: list[Any], group_size: int | None
108 ) -> Iterable[_T]:
109 if multiprocessing.current_process().daemon:
110 # can't create a new pool, when already inside a pool
111 return self._run_function_multithreaded(func, items)
112
113 if not group_size:
114 group_size = int(len(items) / self.workers_number) + 1
115
116 logging.debug(
117 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'spawn'"
118 )
119 with Pool(processes=self.workers_number, context=multiprocessing.get_context("spawn")) as p:
120 if items and isinstance(items[0], tuple):
121 # need to use 'starmap' to pass multiple arguments to the target function
122 return p.starmap(func, items, chunksize=group_size)
123
124 return p.map(func, items, chunksize=group_size)
125
126 def _run_function_multithreaded(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:
127 logging.debug(
128 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'thread'"
129 )
130 with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers_number) as executor:
131 if items and isinstance(items[0], tuple):
132 # split a list of tuple into tuples of the positioned values of the tuple
133 return executor.map(func, *list(zip(*items))) # noqa[B905] # no need to set 'strict' otherwise 'mypy' complains
134
135 return executor.map(func, items)
136
137 def _run_function_sequential(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:
138 logging.debug(
139 f"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'none'"
140 )
141 if items and isinstance(items[0], tuple):
142 # unpack a tuple to pass multiple arguments to the target function
143 return (func(*item) for item in items)
144
145 return (func(item) for item in items)
146
147
148 parallel_runner = ParallelRunner()
149
[end of checkov/common/parallelizer/parallel_runner.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/common/parallelizer/parallel_runner.py b/checkov/common/parallelizer/parallel_runner.py
--- a/checkov/common/parallelizer/parallel_runner.py
+++ b/checkov/common/parallelizer/parallel_runner.py
@@ -55,10 +55,12 @@
def _run_function_multiprocess_fork(
self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]
- ) -> Generator[_T, None, Iterable[_T]]:
+ ) -> Generator[_T, None, None]:
if multiprocessing.current_process().daemon:
# can't fork, when already inside a pool
- return self._run_function_multithreaded(func, items) # noqa: B901
+ for result in self._run_function_multithreaded(func, items):
+ yield result
+ return
if not group_size:
group_size = int(len(items) / self.workers_number) + 1
@@ -101,8 +103,6 @@
except EOFError:
pass
- return []
-
def _run_function_multiprocess_spawn(
self, func: Callable[[Any], _T], items: list[Any], group_size: int | None
) -> Iterable[_T]:
| {"golden_diff": "diff --git a/checkov/common/parallelizer/parallel_runner.py b/checkov/common/parallelizer/parallel_runner.py\n--- a/checkov/common/parallelizer/parallel_runner.py\n+++ b/checkov/common/parallelizer/parallel_runner.py\n@@ -55,10 +55,12 @@\n \n def _run_function_multiprocess_fork(\n self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]\n- ) -> Generator[_T, None, Iterable[_T]]:\n+ ) -> Generator[_T, None, None]:\n if multiprocessing.current_process().daemon:\n # can't fork, when already inside a pool\n- return self._run_function_multithreaded(func, items) # noqa: B901\n+ for result in self._run_function_multithreaded(func, items):\n+ yield result\n+ return\n \n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n@@ -101,8 +103,6 @@\n except EOFError:\n pass\n \n- return []\n-\n def _run_function_multiprocess_spawn(\n self, func: Callable[[Any], _T], items: list[Any], group_size: int | None\n ) -> Iterable[_T]:\n", "issue": "Documentation formatter is turning \"--\" into \"\u2013\" (en dash) in the description field\nIn the [CLI Command Reference](https://www.checkov.io/2.Basics/CLI%20Command%20Reference.html) documentation, all the examples that use `--` options in the \"Description\" column are coming through as `\u2013` (en dash). This makes it impossible to actually copy these examples for use. They are correct in [the Markdown source file](https://github.com/bridgecrewio/checkov/blob/main/docs/2.Basics/CLI%20Command%20Reference.md). To verify, use the browser search for `--output-file-path` and you'll find only one search result instead of two.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\nimport platform\nfrom collections.abc import Iterator, Iterable\nfrom multiprocessing.pool import Pool\nfrom typing import Any, List, Generator, Callable, Optional, TypeVar, TYPE_CHECKING\n\nfrom checkov.common.models.enums import ParallelizationType\n\nif TYPE_CHECKING:\n from multiprocessing.connection import Connection\n\n_T = TypeVar(\"_T\")\n\n\nclass ParallelRunner:\n def __init__(\n self, workers_number: int | None = None, parallelization_type: ParallelizationType = ParallelizationType.FORK\n ) -> None:\n self.workers_number = (workers_number if workers_number else os.cpu_count()) or 1\n self.os = platform.system()\n self.type: str | ParallelizationType = parallelization_type\n\n custom_type = os.getenv(\"CHECKOV_PARALLELIZATION_TYPE\")\n if custom_type:\n self.type = custom_type.lower()\n\n if not custom_type and os.getenv(\"PYCHARM_HOSTED\") == \"1\":\n # PYCHARM_HOSTED env variable equals 1 when debugging via jetbrains IDE.\n # To prevent JetBrains IDE from crashing on debug run sequentially\n self.type = ParallelizationType.NONE\n elif self.os == \"Windows\":\n # 'fork' mode is not supported on 'Windows'\n # 'spawn' mode results in a strange error, which needs to be investigated on an actual Windows machine\n self.type = ParallelizationType.THREAD\n\n def run_function(\n self,\n func: Callable[..., _T],\n items: List[Any],\n group_size: Optional[int] = None,\n ) -> Iterable[_T]:\n if self.type == ParallelizationType.THREAD:\n return self._run_function_multithreaded(func, items)\n elif self.type == ParallelizationType.FORK:\n return self._run_function_multiprocess_fork(func, items, group_size)\n elif self.type == ParallelizationType.SPAWN:\n return self._run_function_multiprocess_spawn(func, items, group_size)\n else:\n return self._run_function_sequential(func, items)\n\n def _run_function_multiprocess_fork(\n self, func: Callable[[Any], _T], items: List[Any], group_size: Optional[int]\n ) -> Generator[_T, None, Iterable[_T]]:\n if multiprocessing.current_process().daemon:\n # can't fork, when already inside a pool\n return self._run_function_multithreaded(func, items) # noqa: B901\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n groups_of_items = [items[i : i + group_size] for i in range(0, len(items), group_size)]\n\n def func_wrapper(original_func: Callable[[Any], _T], items_group: List[Any], connection: Connection) -> None:\n for item in items_group:\n try:\n if isinstance(item, tuple):\n # unpack a tuple to pass multiple arguments to the target function\n result = original_func(*item)\n else:\n result = original_func(item)\n except Exception:\n logging.error(\n f\"Failed to invoke function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with {item}\",\n exc_info=True,\n )\n result = None\n\n connection.send(result)\n connection.close()\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'fork'\"\n )\n processes = []\n for group_of_items in groups_of_items:\n parent_conn, child_conn = multiprocessing.Pipe(duplex=False)\n process = multiprocessing.get_context(\"fork\").Process(\n target=func_wrapper, args=(func, group_of_items, child_conn)\n )\n processes.append((process, parent_conn, len(group_of_items)))\n process.start()\n\n for _, parent_conn, group_len in processes:\n for _ in range(group_len):\n try:\n yield parent_conn.recv()\n except EOFError:\n pass\n\n return []\n\n def _run_function_multiprocess_spawn(\n self, func: Callable[[Any], _T], items: list[Any], group_size: int | None\n ) -> Iterable[_T]:\n if multiprocessing.current_process().daemon:\n # can't create a new pool, when already inside a pool\n return self._run_function_multithreaded(func, items)\n\n if not group_size:\n group_size = int(len(items) / self.workers_number) + 1\n\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'spawn'\"\n )\n with Pool(processes=self.workers_number, context=multiprocessing.get_context(\"spawn\")) as p:\n if items and isinstance(items[0], tuple):\n # need to use 'starmap' to pass multiple arguments to the target function\n return p.starmap(func, items, chunksize=group_size)\n\n return p.map(func, items, chunksize=group_size)\n\n def _run_function_multithreaded(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'thread'\"\n )\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers_number) as executor:\n if items and isinstance(items[0], tuple):\n # split a list of tuple into tuples of the positioned values of the tuple\n return executor.map(func, *list(zip(*items))) # noqa[B905] # no need to set 'strict' otherwise 'mypy' complains\n\n return executor.map(func, items)\n\n def _run_function_sequential(self, func: Callable[[Any], _T], items: List[Any]) -> Iterator[_T]:\n logging.debug(\n f\"Running function {func.__code__.co_filename.replace('.py', '')}.{func.__name__} with parallelization type 'none'\"\n )\n if items and isinstance(items[0], tuple):\n # unpack a tuple to pass multiple arguments to the target function\n return (func(*item) for item in items)\n\n return (func(item) for item in items)\n\n\nparallel_runner = ParallelRunner()\n", "path": "checkov/common/parallelizer/parallel_runner.py"}]} | 2,430 | 287 |
gh_patches_debug_924 | rasdani/github-patches | git_diff | joke2k__faker-993 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
text-unidecode is released under the Artistic license
`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:
> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.
Not being able to charge a fee for the software is problematic for those of us who are contractors, for example.
I realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import io
5 import os
6
7 from setuptools import find_packages, setup
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:
11 README = fp.read()
12
13 with io.open(os.path.join(here, 'VERSION')) as version_file:
14 VERSION = version_file.read().strip()
15
16
17 # this module can be zip-safe if the zipimporter implements iter_modules or if
18 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
19 try:
20 import pkgutil
21 import zipimport
22 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
23 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
24 except (ImportError, AttributeError):
25 zip_safe = False
26
27 setup(
28 name='Faker',
29 version=VERSION,
30 description="Faker is a Python package that generates fake data for you.",
31 long_description=README,
32 entry_points={
33 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
34 },
35 classifiers=[
36 # See https://pypi.org/pypi?%3Aaction=list_classifiers
37 'Development Status :: 5 - Production/Stable',
38 'Environment :: Console',
39 'Intended Audience :: Developers',
40 'Programming Language :: Python',
41 'Programming Language :: Python :: 2',
42 'Programming Language :: Python :: 2.7',
43 'Programming Language :: Python :: 3',
44 'Programming Language :: Python :: 3.4',
45 'Programming Language :: Python :: 3.5',
46 'Programming Language :: Python :: 3.6',
47 'Programming Language :: Python :: 3.7',
48 'Programming Language :: Python :: Implementation :: CPython',
49 'Programming Language :: Python :: Implementation :: PyPy',
50 'Topic :: Software Development :: Libraries :: Python Modules',
51 'Topic :: Software Development :: Testing',
52 'Topic :: Utilities',
53 'License :: OSI Approved :: MIT License',
54 ],
55 keywords='faker fixtures data test mock generator',
56 author='joke2k',
57 author_email='[email protected]',
58 url='https://github.com/joke2k/faker',
59 license='MIT License',
60 packages=find_packages(exclude=["docs", "tests", "tests.*"]),
61 platforms=["any"],
62 test_suite='tests',
63 zip_safe=zip_safe,
64 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
65 setup_requires=["pytest-runner"],
66 install_requires=[
67 "python-dateutil>=2.4",
68 "six>=1.10",
69 "text-unidecode==1.2",
70 ],
71 tests_require=[
72 "validators>=0.13.0",
73 "ukpostcodeparser>=1.1.1",
74 "mock ; python_version < '3.3'",
75 "pytest>=3.8.0,<3.9",
76 "more-itertools<6.0.0 ; python_version < '3.0'",
77 # restricted because they may drop python2 support in future versions
78 # https://github.com/joke2k/faker/issues/970
79 "random2<1.1",
80 "freezegun<0.4",
81 ],
82 extras_require={
83 ':python_version<"3.3"': [
84 'ipaddress',
85 ],
86 },
87 )
88
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -66,7 +66,7 @@
install_requires=[
"python-dateutil>=2.4",
"six>=1.10",
- "text-unidecode==1.2",
+ "text-unidecode==1.3",
],
tests_require=[
"validators>=0.13.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,7 +66,7 @@\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n- \"text-unidecode==1.2\",\n+ \"text-unidecode==1.3\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n", "issue": "text-unidecode is released under the Artistic license\n`text-unidecode` is released under the Artistic license v1.0, which is considered non-free by the FSF (and therefore not compatible with the GPL). I believe this clause is also of concern to commercial users of faker too:\r\n\r\n> 5. You may charge a reasonable copying fee for any distribution of this Package. You may charge any fee you choose for support of this Package. You may not charge a fee for this Package itself. However, you may distribute this Package in aggregate with other (possibly commercial) programs as part of a larger (possibly commercial) software distribution provided that you do not advertise this Package as a product of your own.\r\n\r\nNot being able to charge a fee for the software is problematic for those of us who are contractors, for example.\r\n\r\nI realise there aren't really any good alternatives (`unidecode` is GPL licensed as pointed out in #628 , `isounidecode` doesn't support Python 3), so would a patch making `text-unidecode` an optional dependency be acceptable?\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport io\nimport os\n\nfrom setuptools import find_packages, setup\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as fp:\n README = fp.read()\n\nwith io.open(os.path.join(here, 'VERSION')) as version_file:\n VERSION = version_file.read().strip()\n\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=VERSION,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=[\"docs\", \"tests\", \"tests.*\"]),\n platforms=[\"any\"],\n test_suite='tests',\n zip_safe=zip_safe,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n setup_requires=[\"pytest-runner\"],\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six>=1.10\",\n \"text-unidecode==1.2\",\n ],\n tests_require=[\n \"validators>=0.13.0\",\n \"ukpostcodeparser>=1.1.1\",\n \"mock ; python_version < '3.3'\",\n \"pytest>=3.8.0,<3.9\",\n \"more-itertools<6.0.0 ; python_version < '3.0'\",\n # restricted because they may drop python2 support in future versions\n # https://github.com/joke2k/faker/issues/970\n \"random2<1.1\",\n \"freezegun<0.4\",\n ],\n extras_require={\n ':python_version<\"3.3\"': [\n 'ipaddress',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,677 | 96 |
gh_patches_debug_22236 | rasdani/github-patches | git_diff | onnx__onnx-4386 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make C++ and Python checker API consistent
Python checker API supports `full_check` arg:
https://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94
C++ does not.
It'd be nice for them to be consistent.
</issue>
<code>
[start of onnx/checker.py]
1 # SPDX-License-Identifier: Apache-2.0
2 """onnx checker
3
4 This implements graphalities that allows us to check whether a serialized
5 proto is legal.
6 """
7
8 import functools
9
10 from onnx import (ValueInfoProto,
11 AttributeProto,
12 TensorProto,
13 SparseTensorProto,
14 NodeProto,
15 ModelProto,
16 GraphProto,
17 IR_VERSION)
18 import onnx.onnx_cpp2py_export.checker as C
19 import onnx.defs
20 from google.protobuf.message import Message
21 from typing import TypeVar, Callable, Any, Type, cast, Union
22 import onnx.shape_inference
23 import sys
24
25
26 # Limitation of single protobuf file is 2GB
27 MAXIMUM_PROTOBUF = 2000000000
28
29 # TODO: This thing where we reserialize the protobuf back into the
30 # string, only to deserialize it at the call site, is really goofy.
31 # Stop doing that.
32
33
34 # NB: Please don't edit this context!
35 DEFAULT_CONTEXT = C.CheckerContext()
36 DEFAULT_CONTEXT.ir_version = IR_VERSION
37 # TODO: Maybe ONNX-ML should also be defaulted?
38 DEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}
39
40
41 FuncType = TypeVar('FuncType', bound=Callable[..., Any])
42
43
44 # TODO: This really doesn't seem worth the metaprogramming...
45 def _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:
46 def decorator(py_func: FuncType) -> FuncType:
47 @functools.wraps(py_func)
48 def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:
49 if not isinstance(proto, proto_type):
50 raise RuntimeError(
51 'You cannot pass an object that is not of type {}'.format(
52 proto_type.__name__))
53 return getattr(C, py_func.__name__)(
54 proto.SerializeToString(), ctx)
55 return cast(FuncType, checker)
56 return decorator
57
58
59 @_create_checker(ValueInfoProto)
60 def check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
61 pass
62
63
64 @_create_checker(TensorProto)
65 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
66 pass
67
68
69 @_create_checker(AttributeProto)
70 def check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
71 pass
72
73
74 @_create_checker(NodeProto)
75 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
76 pass
77
78
79 @_create_checker(GraphProto)
80 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
81 pass
82
83
84 def check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:
85 C.check_sparse_tensor(sparse.SerializeToString(), ctx)
86
87
88 def check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:
89 """Check the consistency of a model. An exception is raised if the test fails.
90
91 Arguments:
92 model (ModelProto): model to check
93 full_check (bool): if True, the function checks shapes can be inferred
94 """
95 # If model is a path instead of ModelProto
96 if isinstance(model, str):
97 C.check_model_path(model)
98 if full_check:
99 onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
100 else:
101 protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
102 # If the protobuf is larger than 2GB,
103 # remind users should use the model path to check
104 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
105 raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
106 C.check_model(protobuf_string)
107 if full_check:
108 onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
109
110
111 ValidationError = C.ValidationError
112
[end of onnx/checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/onnx/checker.py b/onnx/checker.py
--- a/onnx/checker.py
+++ b/onnx/checker.py
@@ -94,18 +94,14 @@
"""
# If model is a path instead of ModelProto
if isinstance(model, str):
- C.check_model_path(model)
- if full_check:
- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)
+ C.check_model_path(model, full_check)
else:
protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()
# If the protobuf is larger than 2GB,
# remind users should use the model path to check
if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:
raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')
- C.check_model(protobuf_string)
- if full_check:
- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)
+ C.check_model(protobuf_string, full_check)
ValidationError = C.ValidationError
| {"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -94,18 +94,14 @@\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n- C.check_model_path(model)\n- if full_check:\n- onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n+ C.check_model_path(model, full_check)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n- C.check_model(protobuf_string)\n- if full_check:\n- onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n+ C.check_model(protobuf_string, full_check)\n \n \n ValidationError = C.ValidationError\n", "issue": "Make C++ and Python checker API consistent\nPython checker API supports `full_check` arg:\r\nhttps://github.com/onnx/onnx/blob/fa6f8cfdce3d86346e8a7494f3062b98416c85fb/onnx/checker.py#L94\r\n\r\nC++ does not.\r\nIt'd be nice for them to be consistent.\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n\"\"\"onnx checker\n\nThis implements graphalities that allows us to check whether a serialized\nproto is legal.\n\"\"\"\n\nimport functools\n\nfrom onnx import (ValueInfoProto,\n AttributeProto,\n TensorProto,\n SparseTensorProto,\n NodeProto,\n ModelProto,\n GraphProto,\n IR_VERSION)\nimport onnx.onnx_cpp2py_export.checker as C\nimport onnx.defs\nfrom google.protobuf.message import Message\nfrom typing import TypeVar, Callable, Any, Type, cast, Union\nimport onnx.shape_inference\nimport sys\n\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {'': onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar('FuncType', bound=Callable[..., Any])\n\n\n# TODO: This really doesn't seem worth the metaprogramming...\ndef _create_checker(proto_type: Type[Message]) -> Callable[[FuncType], FuncType]:\n def decorator(py_func: FuncType) -> FuncType:\n @functools.wraps(py_func)\n def checker(proto: Message, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> Any:\n if not isinstance(proto, proto_type):\n raise RuntimeError(\n 'You cannot pass an object that is not of type {}'.format(\n proto_type.__name__))\n return getattr(C, py_func.__name__)(\n proto.SerializeToString(), ctx)\n return cast(FuncType, checker)\n return decorator\n\n\n@_create_checker(ValueInfoProto)\ndef check_value_info(value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(TensorProto)\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(AttributeProto)\ndef check_attribute(attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(NodeProto)\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\n@_create_checker(GraphProto)\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n pass\n\n\ndef check_sparse_tensor(sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(model: Union[ModelProto, str, bytes], full_check: bool = False) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Arguments:\n model (ModelProto): model to check\n full_check (bool): if True, the function checks shapes can be inferred\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, str):\n C.check_model_path(model)\n if full_check:\n onnx.shape_inference.infer_shapes_path(model, check_type=True, strict_mode=True)\n else:\n protobuf_string = model if isinstance(model, bytes) else model.SerializeToString()\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError('This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.')\n C.check_model(protobuf_string)\n if full_check:\n onnx.shape_inference.infer_shapes(model, check_type=True, strict_mode=True)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]} | 1,726 | 249 |
gh_patches_debug_37440 | rasdani/github-patches | git_diff | arviz-devs__arviz-636 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow normalization in plot_parallel
It would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:

</issue>
<code>
[start of arviz/plots/parallelplot.py]
1 """Parallel coordinates plot showing posterior points with and without divergences marked."""
2 import matplotlib.pyplot as plt
3 import numpy as np
4
5 from ..data import convert_to_dataset
6 from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
7 from ..utils import _var_names
8
9
10 def plot_parallel(
11 data,
12 var_names=None,
13 coords=None,
14 figsize=None,
15 textsize=None,
16 legend=True,
17 colornd="k",
18 colord="C1",
19 shadend=0.025,
20 ax=None,
21 ):
22 """
23 Plot parallel coordinates plot showing posterior points with and without divergences.
24
25 Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen
26
27 Parameters
28 ----------
29 data : obj
30 Any object that can be converted to an az.InferenceData object
31 Refer to documentation of az.convert_to_dataset for details
32 var_names : list of variable names
33 Variables to be plotted, if None all variable are plotted. Can be used to change the order
34 of the plotted variables
35 coords : mapping, optional
36 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
37 figsize : tuple
38 Figure size. If None it will be defined automatically.
39 textsize: float
40 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
41 on figsize.
42 legend : bool
43 Flag for plotting legend (defaults to True)
44 colornd : valid matplotlib color
45 color for non-divergent points. Defaults to 'k'
46 colord : valid matplotlib color
47 color for divergent points. Defaults to 'C1'
48 shadend : float
49 Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).
50 Defaults to .025
51 ax : axes
52 Matplotlib axes.
53
54 Returns
55 -------
56 ax : matplotlib axes
57 """
58 if coords is None:
59 coords = {}
60
61 # Get diverging draws and combine chains
62 divergent_data = convert_to_dataset(data, group="sample_stats")
63 _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=("diverging",), combined=True)
64 diverging_mask = np.squeeze(diverging_mask)
65
66 # Get posterior draws and combine chains
67 posterior_data = convert_to_dataset(data, group="posterior")
68 var_names = _var_names(var_names, posterior_data)
69 var_names, _posterior = xarray_to_ndarray(
70 get_coords(posterior_data, coords), var_names=var_names, combined=True
71 )
72
73 if len(var_names) < 2:
74 raise ValueError("This plot needs at least two variables")
75
76 figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
77
78 if ax is None:
79 _, ax = plt.subplots(figsize=figsize, constrained_layout=True)
80
81 ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)
82
83 if np.any(diverging_mask):
84 ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)
85
86 ax.tick_params(labelsize=textsize)
87 ax.set_xticks(range(len(var_names)))
88 ax.set_xticklabels(var_names)
89
90 if legend:
91 ax.plot([], color=colornd, label="non-divergent")
92 if np.any(diverging_mask):
93 ax.plot([], color=colord, label="divergent")
94 ax.legend(fontsize=xt_labelsize)
95
96 return ax
97
[end of arviz/plots/parallelplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py
--- a/arviz/plots/parallelplot.py
+++ b/arviz/plots/parallelplot.py
@@ -2,6 +2,7 @@
import matplotlib.pyplot as plt
import numpy as np
+from scipy.stats.mstats import rankdata
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
from ..utils import _var_names
@@ -18,6 +19,7 @@
colord="C1",
shadend=0.025,
ax=None,
+ norm_method=None,
):
"""
Plot parallel coordinates plot showing posterior points with and without divergences.
@@ -50,10 +52,33 @@
Defaults to .025
ax : axes
Matplotlib axes.
+ norm_method : str
+ Method for normalizing the data. Methods include normal, minmax and rank.
+ Defaults to none.
Returns
-------
ax : matplotlib axes
+
+ Examples
+ --------
+ Plot default parallel plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> data = az.load_arviz_data('centered_eight')
+ >>> az.plot_parallel(data, var_names=["mu", "tau"])
+
+
+ Plot parallel plot with normalization
+
+ .. plot::
+ :context: close-figs
+
+ >>> az.plot_parallel(data, var_names=["mu", "tau"], norm_method='normal')
+
"""
if coords is None:
coords = {}
@@ -69,9 +94,23 @@
var_names, _posterior = xarray_to_ndarray(
get_coords(posterior_data, coords), var_names=var_names, combined=True
)
-
if len(var_names) < 2:
raise ValueError("This plot needs at least two variables")
+ if norm_method is not None:
+ if norm_method == "normal":
+ mean = np.mean(_posterior, axis=1)
+ standard_deviation = np.std(_posterior, axis=1)
+ for i in range(0, np.shape(mean)[0]):
+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]
+ elif norm_method == "minmax":
+ min_elem = np.min(_posterior, axis=1)
+ max_elem = np.max(_posterior, axis=1)
+ for i in range(0, np.shape(min_elem)[0]):
+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])
+ elif norm_method == "rank":
+ _posterior = rankdata(_posterior, axis=1)
+ else:
+ raise ValueError("{} is not supported. Use normal, minmax or rank.".format(norm_method))
figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
| {"golden_diff": "diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py\n--- a/arviz/plots/parallelplot.py\n+++ b/arviz/plots/parallelplot.py\n@@ -2,6 +2,7 @@\n import matplotlib.pyplot as plt\n import numpy as np\n \n+from scipy.stats.mstats import rankdata\n from ..data import convert_to_dataset\n from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\n from ..utils import _var_names\n@@ -18,6 +19,7 @@\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n+ norm_method=None,\n ):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n@@ -50,10 +52,33 @@\n Defaults to .025\n ax : axes\n Matplotlib axes.\n+ norm_method : str\n+ Method for normalizing the data. Methods include normal, minmax and rank.\n+ Defaults to none.\n \n Returns\n -------\n ax : matplotlib axes\n+\n+ Examples\n+ --------\n+ Plot default parallel plot\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> import arviz as az\n+ >>> data = az.load_arviz_data('centered_eight')\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"])\n+\n+\n+ Plot parallel plot with normalization\n+\n+ .. plot::\n+ :context: close-figs\n+\n+ >>> az.plot_parallel(data, var_names=[\"mu\", \"tau\"], norm_method='normal')\n+\n \"\"\"\n if coords is None:\n coords = {}\n@@ -69,9 +94,23 @@\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n-\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n+ if norm_method is not None:\n+ if norm_method == \"normal\":\n+ mean = np.mean(_posterior, axis=1)\n+ standard_deviation = np.std(_posterior, axis=1)\n+ for i in range(0, np.shape(mean)[0]):\n+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]\n+ elif norm_method == \"minmax\":\n+ min_elem = np.min(_posterior, axis=1)\n+ max_elem = np.max(_posterior, axis=1)\n+ for i in range(0, np.shape(min_elem)[0]):\n+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])\n+ elif norm_method == \"rank\":\n+ _posterior = rankdata(_posterior, axis=1)\n+ else:\n+ raise ValueError(\"{} is not supported. Use normal, minmax or rank.\".format(norm_method))\n \n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n", "issue": "Allow normalization in plot_parallel\nIt would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:\r\n\n", "before_files": [{"content": "\"\"\"Parallel coordinates plot showing posterior points with and without divergences marked.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords\nfrom ..utils import _var_names\n\n\ndef plot_parallel(\n data,\n var_names=None,\n coords=None,\n figsize=None,\n textsize=None,\n legend=True,\n colornd=\"k\",\n colord=\"C1\",\n shadend=0.025,\n ax=None,\n):\n \"\"\"\n Plot parallel coordinates plot showing posterior points with and without divergences.\n\n Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen\n\n Parameters\n ----------\n data : obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names : list of variable names\n Variables to be plotted, if None all variable are plotted. Can be used to change the order\n of the plotted variables\n coords : mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n figsize : tuple\n Figure size. If None it will be defined automatically.\n textsize: float\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on figsize.\n legend : bool\n Flag for plotting legend (defaults to True)\n colornd : valid matplotlib color\n color for non-divergent points. Defaults to 'k'\n colord : valid matplotlib color\n color for divergent points. Defaults to 'C1'\n shadend : float\n Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).\n Defaults to .025\n ax : axes\n Matplotlib axes.\n\n Returns\n -------\n ax : matplotlib axes\n \"\"\"\n if coords is None:\n coords = {}\n\n # Get diverging draws and combine chains\n divergent_data = convert_to_dataset(data, group=\"sample_stats\")\n _, diverging_mask = xarray_to_ndarray(divergent_data, var_names=(\"diverging\",), combined=True)\n diverging_mask = np.squeeze(diverging_mask)\n\n # Get posterior draws and combine chains\n posterior_data = convert_to_dataset(data, group=\"posterior\")\n var_names = _var_names(var_names, posterior_data)\n var_names, _posterior = xarray_to_ndarray(\n get_coords(posterior_data, coords), var_names=var_names, combined=True\n )\n\n if len(var_names) < 2:\n raise ValueError(\"This plot needs at least two variables\")\n\n figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)\n\n if ax is None:\n _, ax = plt.subplots(figsize=figsize, constrained_layout=True)\n\n ax.plot(_posterior[:, ~diverging_mask], color=colornd, alpha=shadend)\n\n if np.any(diverging_mask):\n ax.plot(_posterior[:, diverging_mask], color=colord, lw=1)\n\n ax.tick_params(labelsize=textsize)\n ax.set_xticks(range(len(var_names)))\n ax.set_xticklabels(var_names)\n\n if legend:\n ax.plot([], color=colornd, label=\"non-divergent\")\n if np.any(diverging_mask):\n ax.plot([], color=colord, label=\"divergent\")\n ax.legend(fontsize=xt_labelsize)\n\n return ax\n", "path": "arviz/plots/parallelplot.py"}]} | 1,631 | 702 |
gh_patches_debug_30482 | rasdani/github-patches | git_diff | SeldonIO__MLServer-605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add docker build option to not use cache
To ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.
In docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally.
</issue>
<code>
[start of mlserver/cli/build.py]
1 import subprocess
2 import os
3
4 from tempfile import TemporaryDirectory
5
6 from .. import __version__
7 from ..logging import logger
8
9 from .constants import (
10 DockerfileName,
11 DockerfileTemplate,
12 DockerignoreName,
13 Dockerignore,
14 )
15
16
17 def generate_dockerfile() -> str:
18 return DockerfileTemplate.format(version=__version__)
19
20
21 def write_dockerfile(
22 folder: str, dockerfile: str, include_dockerignore: bool = True
23 ) -> str:
24 dockerfile_path = os.path.join(folder, DockerfileName)
25 with open(dockerfile_path, "w") as dockerfile_handler:
26 logger.info(f"Writing Dockerfile in {dockerfile_path}")
27 dockerfile_handler.write(dockerfile)
28
29 if include_dockerignore:
30 # Point to our own .dockerignore
31 # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file
32 dockerignore_path = dockerfile_path + DockerignoreName
33 with open(dockerignore_path, "w") as dockerignore_handler:
34 logger.info(f"Writing .dockerignore in {dockerignore_path}")
35 dockerignore_handler.write(Dockerignore)
36
37 return dockerfile_path
38
39
40 def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
41 logger.info(f"Building Docker image with tag {image_tag}")
42 with TemporaryDirectory() as tmp_dir:
43 dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
44
45 build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
46 build_env = os.environ.copy()
47 build_env["DOCKER_BUILDKIT"] = "1"
48 subprocess.run(build_cmd, check=True, shell=True, env=build_env)
49
50 return image_tag
51
[end of mlserver/cli/build.py]
[start of mlserver/cli/main.py]
1 """
2 Command-line interface to manage MLServer models.
3 """
4 import click
5 import asyncio
6
7 from functools import wraps
8
9 from ..server import MLServer
10 from ..logging import logger, configure_logger
11 from ..utils import install_uvloop_event_loop
12
13 from .build import generate_dockerfile, build_image, write_dockerfile
14 from .serve import load_settings
15
16
17 def click_async(f):
18 @wraps(f)
19 def wrapper(*args, **kwargs):
20 return asyncio.run(f(*args, **kwargs))
21
22 return wrapper
23
24
25 @click.group()
26 @click.version_option()
27 def root():
28 """
29 Command-line interface to manage MLServer models.
30 """
31 pass
32
33
34 @root.command("start")
35 @click.argument("folder", nargs=1)
36 @click_async
37 async def start(folder: str):
38 """
39 Start serving a machine learning model with MLServer.
40 """
41 settings, models_settings = await load_settings(folder)
42
43 server = MLServer(settings)
44 await server.start(models_settings)
45
46
47 @root.command("build")
48 @click.argument("folder", nargs=1)
49 @click.option("-t", "--tag", type=str)
50 @click_async
51 async def build(folder: str, tag: str):
52 """
53 Build a Docker image for a custom MLServer runtime.
54 """
55 dockerfile = generate_dockerfile()
56 build_image(folder, dockerfile, tag)
57 logger.info(f"Successfully built custom Docker image with tag {tag}")
58
59
60 @root.command("dockerfile")
61 @click.argument("folder", nargs=1)
62 @click.option("-i", "--include-dockerignore", is_flag=True)
63 @click_async
64 async def dockerfile(folder: str, include_dockerignore: bool):
65 """
66 Generate a Dockerfile
67 """
68 dockerfile = generate_dockerfile()
69 dockerfile_path = write_dockerfile(
70 folder, dockerfile, include_dockerignore=include_dockerignore
71 )
72 logger.info(f"Successfully written Dockerfile in {dockerfile_path}")
73
74
75 def main():
76 configure_logger()
77 install_uvloop_event_loop()
78 root()
79
80
81 if __name__ == "__main__":
82 main()
83
[end of mlserver/cli/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py
--- a/mlserver/cli/build.py
+++ b/mlserver/cli/build.py
@@ -37,12 +37,18 @@
return dockerfile_path
-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:
+def build_image(
+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False
+) -> str:
logger.info(f"Building Docker image with tag {image_tag}")
+ _docker_command_prefix = "docker build --rm "
with TemporaryDirectory() as tmp_dir:
dockerfile_path = write_dockerfile(tmp_dir, dockerfile)
-
- build_cmd = f"docker build {folder} -f {dockerfile_path} -t {image_tag}"
+ _docker_command_suffix = f"{folder} -f {dockerfile_path} -t {image_tag}"
+ if no_cache:
+ build_cmd = _docker_command_prefix + "--no-cache " + _docker_command_suffix
+ else:
+ build_cmd = _docker_command_prefix + _docker_command_suffix
build_env = os.environ.copy()
build_env["DOCKER_BUILDKIT"] = "1"
subprocess.run(build_cmd, check=True, shell=True, env=build_env)
diff --git a/mlserver/cli/main.py b/mlserver/cli/main.py
--- a/mlserver/cli/main.py
+++ b/mlserver/cli/main.py
@@ -47,13 +47,14 @@
@root.command("build")
@click.argument("folder", nargs=1)
@click.option("-t", "--tag", type=str)
[email protected]("--no-cache", default=False, is_flag=True)
@click_async
-async def build(folder: str, tag: str):
+async def build(folder: str, tag: str, no_cache: bool = False):
"""
Build a Docker image for a custom MLServer runtime.
"""
dockerfile = generate_dockerfile()
- build_image(folder, dockerfile, tag)
+ build_image(folder, dockerfile, tag, no_cache=no_cache)
logger.info(f"Successfully built custom Docker image with tag {tag}")
| {"golden_diff": "diff --git a/mlserver/cli/build.py b/mlserver/cli/build.py\n--- a/mlserver/cli/build.py\n+++ b/mlserver/cli/build.py\n@@ -37,12 +37,18 @@\n return dockerfile_path\n \n \n-def build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n+def build_image(\n+ folder: str, dockerfile: str, image_tag: str, no_cache: bool = False\n+) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n+ _docker_command_prefix = \"docker build --rm \"\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n-\n- build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n+ _docker_command_suffix = f\"{folder} -f {dockerfile_path} -t {image_tag}\"\n+ if no_cache:\n+ build_cmd = _docker_command_prefix + \"--no-cache \" + _docker_command_suffix\n+ else:\n+ build_cmd = _docker_command_prefix + _docker_command_suffix\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\ndiff --git a/mlserver/cli/main.py b/mlserver/cli/main.py\n--- a/mlserver/cli/main.py\n+++ b/mlserver/cli/main.py\n@@ -47,13 +47,14 @@\n @root.command(\"build\")\n @click.argument(\"folder\", nargs=1)\n @click.option(\"-t\", \"--tag\", type=str)\[email protected](\"--no-cache\", default=False, is_flag=True)\n @click_async\n-async def build(folder: str, tag: str):\n+async def build(folder: str, tag: str, no_cache: bool = False):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n- build_image(folder, dockerfile, tag)\n+ build_image(folder, dockerfile, tag, no_cache=no_cache)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n", "issue": "Add docker build option to not use cache\nTo ensure fresh environment and while potentially developing a new model version users will not want to use cached layers in the docker build for their image.\r\n\r\nIn docker this is the --no-cache option, I would make it a boolean option within the cli and also add the --rm option (although default is true might be worth making sure) to remove any intermediate containers after success to again ensure a clean environment in CI/CD as well as locally. \n", "before_files": [{"content": "import subprocess\nimport os\n\nfrom tempfile import TemporaryDirectory\n\nfrom .. import __version__\nfrom ..logging import logger\n\nfrom .constants import (\n DockerfileName,\n DockerfileTemplate,\n DockerignoreName,\n Dockerignore,\n)\n\n\ndef generate_dockerfile() -> str:\n return DockerfileTemplate.format(version=__version__)\n\n\ndef write_dockerfile(\n folder: str, dockerfile: str, include_dockerignore: bool = True\n) -> str:\n dockerfile_path = os.path.join(folder, DockerfileName)\n with open(dockerfile_path, \"w\") as dockerfile_handler:\n logger.info(f\"Writing Dockerfile in {dockerfile_path}\")\n dockerfile_handler.write(dockerfile)\n\n if include_dockerignore:\n # Point to our own .dockerignore\n # https://docs.docker.com/engine/reference/commandline/build/#use-a-dockerignore-file\n dockerignore_path = dockerfile_path + DockerignoreName\n with open(dockerignore_path, \"w\") as dockerignore_handler:\n logger.info(f\"Writing .dockerignore in {dockerignore_path}\")\n dockerignore_handler.write(Dockerignore)\n\n return dockerfile_path\n\n\ndef build_image(folder: str, dockerfile: str, image_tag: str) -> str:\n logger.info(f\"Building Docker image with tag {image_tag}\")\n with TemporaryDirectory() as tmp_dir:\n dockerfile_path = write_dockerfile(tmp_dir, dockerfile)\n\n build_cmd = f\"docker build {folder} -f {dockerfile_path} -t {image_tag}\"\n build_env = os.environ.copy()\n build_env[\"DOCKER_BUILDKIT\"] = \"1\"\n subprocess.run(build_cmd, check=True, shell=True, env=build_env)\n\n return image_tag\n", "path": "mlserver/cli/build.py"}, {"content": "\"\"\"\nCommand-line interface to manage MLServer models.\n\"\"\"\nimport click\nimport asyncio\n\nfrom functools import wraps\n\nfrom ..server import MLServer\nfrom ..logging import logger, configure_logger\nfrom ..utils import install_uvloop_event_loop\n\nfrom .build import generate_dockerfile, build_image, write_dockerfile\nfrom .serve import load_settings\n\n\ndef click_async(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n return asyncio.run(f(*args, **kwargs))\n\n return wrapper\n\n\[email protected]()\[email protected]_option()\ndef root():\n \"\"\"\n Command-line interface to manage MLServer models.\n \"\"\"\n pass\n\n\[email protected](\"start\")\[email protected](\"folder\", nargs=1)\n@click_async\nasync def start(folder: str):\n \"\"\"\n Start serving a machine learning model with MLServer.\n \"\"\"\n settings, models_settings = await load_settings(folder)\n\n server = MLServer(settings)\n await server.start(models_settings)\n\n\[email protected](\"build\")\[email protected](\"folder\", nargs=1)\[email protected](\"-t\", \"--tag\", type=str)\n@click_async\nasync def build(folder: str, tag: str):\n \"\"\"\n Build a Docker image for a custom MLServer runtime.\n \"\"\"\n dockerfile = generate_dockerfile()\n build_image(folder, dockerfile, tag)\n logger.info(f\"Successfully built custom Docker image with tag {tag}\")\n\n\[email protected](\"dockerfile\")\[email protected](\"folder\", nargs=1)\[email protected](\"-i\", \"--include-dockerignore\", is_flag=True)\n@click_async\nasync def dockerfile(folder: str, include_dockerignore: bool):\n \"\"\"\n Generate a Dockerfile\n \"\"\"\n dockerfile = generate_dockerfile()\n dockerfile_path = write_dockerfile(\n folder, dockerfile, include_dockerignore=include_dockerignore\n )\n logger.info(f\"Successfully written Dockerfile in {dockerfile_path}\")\n\n\ndef main():\n configure_logger()\n install_uvloop_event_loop()\n root()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "mlserver/cli/main.py"}]} | 1,738 | 479 |
gh_patches_debug_30463 | rasdani/github-patches | git_diff | psf__black-3217 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Preview option support for blackd
It seems that **blackd** does not support [the preview option](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html?highlight=preview#preview-style) now.
I suggest implementing an option for that like it is done for other features https://black.readthedocs.io/en/stable/usage_and_configuration/black_as_a_server.html#protocol
Something like `X-PREVIEW` would be fine.
I faced this while using **[intellij-blackconnect](https://github.com/lensvol/intellij-blackconnect/)** and there is an issue about that too https://github.com/lensvol/intellij-blackconnect/issues/37
</issue>
<code>
[start of src/blackd/__init__.py]
1 import asyncio
2 import logging
3 from concurrent.futures import Executor, ProcessPoolExecutor
4 from datetime import datetime
5 from functools import partial
6 from multiprocessing import freeze_support
7 from typing import Set, Tuple
8
9 try:
10 from aiohttp import web
11
12 from .middlewares import cors
13 except ImportError as ie:
14 raise ImportError(
15 f"aiohttp dependency is not installed: {ie}. "
16 + "Please re-install black with the '[d]' extra install "
17 + "to obtain aiohttp_cors: `pip install black[d]`"
18 ) from None
19
20 import click
21
22 import black
23 from _black_version import version as __version__
24 from black.concurrency import maybe_install_uvloop
25
26 # This is used internally by tests to shut down the server prematurely
27 _stop_signal = asyncio.Event()
28
29 # Request headers
30 PROTOCOL_VERSION_HEADER = "X-Protocol-Version"
31 LINE_LENGTH_HEADER = "X-Line-Length"
32 PYTHON_VARIANT_HEADER = "X-Python-Variant"
33 SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
34 SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma"
35 FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
36 DIFF_HEADER = "X-Diff"
37
38 BLACK_HEADERS = [
39 PROTOCOL_VERSION_HEADER,
40 LINE_LENGTH_HEADER,
41 PYTHON_VARIANT_HEADER,
42 SKIP_STRING_NORMALIZATION_HEADER,
43 SKIP_MAGIC_TRAILING_COMMA,
44 FAST_OR_SAFE_HEADER,
45 DIFF_HEADER,
46 ]
47
48 # Response headers
49 BLACK_VERSION_HEADER = "X-Black-Version"
50
51
52 class InvalidVariantHeader(Exception):
53 pass
54
55
56 @click.command(context_settings={"help_option_names": ["-h", "--help"]})
57 @click.option(
58 "--bind-host", type=str, help="Address to bind the server to.", default="localhost"
59 )
60 @click.option("--bind-port", type=int, help="Port to listen on", default=45484)
61 @click.version_option(version=black.__version__)
62 def main(bind_host: str, bind_port: int) -> None:
63 logging.basicConfig(level=logging.INFO)
64 app = make_app()
65 ver = black.__version__
66 black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}")
67 web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)
68
69
70 def make_app() -> web.Application:
71 app = web.Application(
72 middlewares=[cors(allow_headers=(*BLACK_HEADERS, "Content-Type"))]
73 )
74 executor = ProcessPoolExecutor()
75 app.add_routes([web.post("/", partial(handle, executor=executor))])
76 return app
77
78
79 async def handle(request: web.Request, executor: Executor) -> web.Response:
80 headers = {BLACK_VERSION_HEADER: __version__}
81 try:
82 if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1":
83 return web.Response(
84 status=501, text="This server only supports protocol version 1"
85 )
86 try:
87 line_length = int(
88 request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)
89 )
90 except ValueError:
91 return web.Response(status=400, text="Invalid line length header value")
92
93 if PYTHON_VARIANT_HEADER in request.headers:
94 value = request.headers[PYTHON_VARIANT_HEADER]
95 try:
96 pyi, versions = parse_python_variant_header(value)
97 except InvalidVariantHeader as e:
98 return web.Response(
99 status=400,
100 text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}",
101 )
102 else:
103 pyi = False
104 versions = set()
105
106 skip_string_normalization = bool(
107 request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)
108 )
109 skip_magic_trailing_comma = bool(
110 request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)
111 )
112 fast = False
113 if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
114 fast = True
115 mode = black.FileMode(
116 target_versions=versions,
117 is_pyi=pyi,
118 line_length=line_length,
119 string_normalization=not skip_string_normalization,
120 magic_trailing_comma=not skip_magic_trailing_comma,
121 )
122 req_bytes = await request.content.read()
123 charset = request.charset if request.charset is not None else "utf8"
124 req_str = req_bytes.decode(charset)
125 then = datetime.utcnow()
126
127 loop = asyncio.get_event_loop()
128 formatted_str = await loop.run_in_executor(
129 executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)
130 )
131
132 # Only output the diff in the HTTP response
133 only_diff = bool(request.headers.get(DIFF_HEADER, False))
134 if only_diff:
135 now = datetime.utcnow()
136 src_name = f"In\t{then} +0000"
137 dst_name = f"Out\t{now} +0000"
138 loop = asyncio.get_event_loop()
139 formatted_str = await loop.run_in_executor(
140 executor,
141 partial(black.diff, req_str, formatted_str, src_name, dst_name),
142 )
143
144 return web.Response(
145 content_type=request.content_type,
146 charset=charset,
147 headers=headers,
148 text=formatted_str,
149 )
150 except black.NothingChanged:
151 return web.Response(status=204, headers=headers)
152 except black.InvalidInput as e:
153 return web.Response(status=400, headers=headers, text=str(e))
154 except Exception as e:
155 logging.exception("Exception during handling a request")
156 return web.Response(status=500, headers=headers, text=str(e))
157
158
159 def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:
160 if value == "pyi":
161 return True, set()
162 else:
163 versions = set()
164 for version in value.split(","):
165 if version.startswith("py"):
166 version = version[len("py") :]
167 if "." in version:
168 major_str, *rest = version.split(".")
169 else:
170 major_str = version[0]
171 rest = [version[1:]] if len(version) > 1 else []
172 try:
173 major = int(major_str)
174 if major not in (2, 3):
175 raise InvalidVariantHeader("major version must be 2 or 3")
176 if len(rest) > 0:
177 minor = int(rest[0])
178 if major == 2:
179 raise InvalidVariantHeader("Python 2 is not supported")
180 else:
181 # Default to lowest supported minor version.
182 minor = 7 if major == 2 else 3
183 version_str = f"PY{major}{minor}"
184 if major == 3 and not hasattr(black.TargetVersion, version_str):
185 raise InvalidVariantHeader(f"3.{minor} is not supported")
186 versions.add(black.TargetVersion[version_str])
187 except (KeyError, ValueError):
188 raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") from None
189 return False, versions
190
191
192 def patched_main() -> None:
193 maybe_install_uvloop()
194 freeze_support()
195 black.patch_click()
196 main()
197
198
199 if __name__ == "__main__":
200 patched_main()
201
[end of src/blackd/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py
--- a/src/blackd/__init__.py
+++ b/src/blackd/__init__.py
@@ -32,6 +32,7 @@
PYTHON_VARIANT_HEADER = "X-Python-Variant"
SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization"
SKIP_MAGIC_TRAILING_COMMA = "X-Skip-Magic-Trailing-Comma"
+PREVIEW = "X-Preview"
FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe"
DIFF_HEADER = "X-Diff"
@@ -41,6 +42,7 @@
PYTHON_VARIANT_HEADER,
SKIP_STRING_NORMALIZATION_HEADER,
SKIP_MAGIC_TRAILING_COMMA,
+ PREVIEW,
FAST_OR_SAFE_HEADER,
DIFF_HEADER,
]
@@ -109,6 +111,7 @@
skip_magic_trailing_comma = bool(
request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)
)
+ preview = bool(request.headers.get(PREVIEW, False))
fast = False
if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast":
fast = True
@@ -118,6 +121,7 @@
line_length=line_length,
string_normalization=not skip_string_normalization,
magic_trailing_comma=not skip_magic_trailing_comma,
+ preview=preview,
)
req_bytes = await request.content.read()
charset = request.charset if request.charset is not None else "utf8"
| {"golden_diff": "diff --git a/src/blackd/__init__.py b/src/blackd/__init__.py\n--- a/src/blackd/__init__.py\n+++ b/src/blackd/__init__.py\n@@ -32,6 +32,7 @@\n PYTHON_VARIANT_HEADER = \"X-Python-Variant\"\n SKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\n SKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\n+PREVIEW = \"X-Preview\"\n FAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\n DIFF_HEADER = \"X-Diff\"\n \n@@ -41,6 +42,7 @@\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n+ PREVIEW,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n ]\n@@ -109,6 +111,7 @@\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n+ preview = bool(request.headers.get(PREVIEW, False))\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n@@ -118,6 +121,7 @@\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n+ preview=preview,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n", "issue": "Preview option support for blackd\nIt seems that **blackd** does not support [the preview option](https://black.readthedocs.io/en/stable/the_black_code_style/future_style.html?highlight=preview#preview-style) now.\r\n\r\nI suggest implementing an option for that like it is done for other features https://black.readthedocs.io/en/stable/usage_and_configuration/black_as_a_server.html#protocol\r\n\r\nSomething like `X-PREVIEW` would be fine.\r\n\r\nI faced this while using **[intellij-blackconnect](https://github.com/lensvol/intellij-blackconnect/)** and there is an issue about that too https://github.com/lensvol/intellij-blackconnect/issues/37\n", "before_files": [{"content": "import asyncio\nimport logging\nfrom concurrent.futures import Executor, ProcessPoolExecutor\nfrom datetime import datetime\nfrom functools import partial\nfrom multiprocessing import freeze_support\nfrom typing import Set, Tuple\n\ntry:\n from aiohttp import web\n\n from .middlewares import cors\nexcept ImportError as ie:\n raise ImportError(\n f\"aiohttp dependency is not installed: {ie}. \"\n + \"Please re-install black with the '[d]' extra install \"\n + \"to obtain aiohttp_cors: `pip install black[d]`\"\n ) from None\n\nimport click\n\nimport black\nfrom _black_version import version as __version__\nfrom black.concurrency import maybe_install_uvloop\n\n# This is used internally by tests to shut down the server prematurely\n_stop_signal = asyncio.Event()\n\n# Request headers\nPROTOCOL_VERSION_HEADER = \"X-Protocol-Version\"\nLINE_LENGTH_HEADER = \"X-Line-Length\"\nPYTHON_VARIANT_HEADER = \"X-Python-Variant\"\nSKIP_STRING_NORMALIZATION_HEADER = \"X-Skip-String-Normalization\"\nSKIP_MAGIC_TRAILING_COMMA = \"X-Skip-Magic-Trailing-Comma\"\nFAST_OR_SAFE_HEADER = \"X-Fast-Or-Safe\"\nDIFF_HEADER = \"X-Diff\"\n\nBLACK_HEADERS = [\n PROTOCOL_VERSION_HEADER,\n LINE_LENGTH_HEADER,\n PYTHON_VARIANT_HEADER,\n SKIP_STRING_NORMALIZATION_HEADER,\n SKIP_MAGIC_TRAILING_COMMA,\n FAST_OR_SAFE_HEADER,\n DIFF_HEADER,\n]\n\n# Response headers\nBLACK_VERSION_HEADER = \"X-Black-Version\"\n\n\nclass InvalidVariantHeader(Exception):\n pass\n\n\[email protected](context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\[email protected](\n \"--bind-host\", type=str, help=\"Address to bind the server to.\", default=\"localhost\"\n)\[email protected](\"--bind-port\", type=int, help=\"Port to listen on\", default=45484)\[email protected]_option(version=black.__version__)\ndef main(bind_host: str, bind_port: int) -> None:\n logging.basicConfig(level=logging.INFO)\n app = make_app()\n ver = black.__version__\n black.out(f\"blackd version {ver} listening on {bind_host} port {bind_port}\")\n web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None)\n\n\ndef make_app() -> web.Application:\n app = web.Application(\n middlewares=[cors(allow_headers=(*BLACK_HEADERS, \"Content-Type\"))]\n )\n executor = ProcessPoolExecutor()\n app.add_routes([web.post(\"/\", partial(handle, executor=executor))])\n return app\n\n\nasync def handle(request: web.Request, executor: Executor) -> web.Response:\n headers = {BLACK_VERSION_HEADER: __version__}\n try:\n if request.headers.get(PROTOCOL_VERSION_HEADER, \"1\") != \"1\":\n return web.Response(\n status=501, text=\"This server only supports protocol version 1\"\n )\n try:\n line_length = int(\n request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH)\n )\n except ValueError:\n return web.Response(status=400, text=\"Invalid line length header value\")\n\n if PYTHON_VARIANT_HEADER in request.headers:\n value = request.headers[PYTHON_VARIANT_HEADER]\n try:\n pyi, versions = parse_python_variant_header(value)\n except InvalidVariantHeader as e:\n return web.Response(\n status=400,\n text=f\"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}\",\n )\n else:\n pyi = False\n versions = set()\n\n skip_string_normalization = bool(\n request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False)\n )\n skip_magic_trailing_comma = bool(\n request.headers.get(SKIP_MAGIC_TRAILING_COMMA, False)\n )\n fast = False\n if request.headers.get(FAST_OR_SAFE_HEADER, \"safe\") == \"fast\":\n fast = True\n mode = black.FileMode(\n target_versions=versions,\n is_pyi=pyi,\n line_length=line_length,\n string_normalization=not skip_string_normalization,\n magic_trailing_comma=not skip_magic_trailing_comma,\n )\n req_bytes = await request.content.read()\n charset = request.charset if request.charset is not None else \"utf8\"\n req_str = req_bytes.decode(charset)\n then = datetime.utcnow()\n\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode)\n )\n\n # Only output the diff in the HTTP response\n only_diff = bool(request.headers.get(DIFF_HEADER, False))\n if only_diff:\n now = datetime.utcnow()\n src_name = f\"In\\t{then} +0000\"\n dst_name = f\"Out\\t{now} +0000\"\n loop = asyncio.get_event_loop()\n formatted_str = await loop.run_in_executor(\n executor,\n partial(black.diff, req_str, formatted_str, src_name, dst_name),\n )\n\n return web.Response(\n content_type=request.content_type,\n charset=charset,\n headers=headers,\n text=formatted_str,\n )\n except black.NothingChanged:\n return web.Response(status=204, headers=headers)\n except black.InvalidInput as e:\n return web.Response(status=400, headers=headers, text=str(e))\n except Exception as e:\n logging.exception(\"Exception during handling a request\")\n return web.Response(status=500, headers=headers, text=str(e))\n\n\ndef parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]:\n if value == \"pyi\":\n return True, set()\n else:\n versions = set()\n for version in value.split(\",\"):\n if version.startswith(\"py\"):\n version = version[len(\"py\") :]\n if \".\" in version:\n major_str, *rest = version.split(\".\")\n else:\n major_str = version[0]\n rest = [version[1:]] if len(version) > 1 else []\n try:\n major = int(major_str)\n if major not in (2, 3):\n raise InvalidVariantHeader(\"major version must be 2 or 3\")\n if len(rest) > 0:\n minor = int(rest[0])\n if major == 2:\n raise InvalidVariantHeader(\"Python 2 is not supported\")\n else:\n # Default to lowest supported minor version.\n minor = 7 if major == 2 else 3\n version_str = f\"PY{major}{minor}\"\n if major == 3 and not hasattr(black.TargetVersion, version_str):\n raise InvalidVariantHeader(f\"3.{minor} is not supported\")\n versions.add(black.TargetVersion[version_str])\n except (KeyError, ValueError):\n raise InvalidVariantHeader(\"expected e.g. '3.7', 'py3.5'\") from None\n return False, versions\n\n\ndef patched_main() -> None:\n maybe_install_uvloop()\n freeze_support()\n black.patch_click()\n main()\n\n\nif __name__ == \"__main__\":\n patched_main()\n", "path": "src/blackd/__init__.py"}]} | 2,742 | 341 |
gh_patches_debug_56767 | rasdani/github-patches | git_diff | DataDog__dd-agent-2387 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[iis] Non-digit value in WMI metric name
Getting this warning in v5.7.1 and `iis.site_up` is permanently triggered.
```
2016-03-16 14:57:40 GMT Standard Time | WARNING | checks.iis(iis.py:127) | When extracting metrics with WMI, found a non digit value for property 'name'.
```
This was working fine in v5.6.1
</issue>
<code>
[start of checks.d/iis.py]
1 '''
2 Check the performance counters from IIS
3 '''
4 # 3p
5 import pythoncom
6
7 # project
8 from checks import AgentCheck
9 from checks.wmi_check import WinWMICheck, WMIMetric
10 from config import _is_affirmative
11 from utils.containers import hash_mutable
12 from utils.timeout import TimeoutException
13
14
15 class IIS(WinWMICheck):
16 METRICS = [
17 ('ServiceUptime', 'iis.uptime', 'gauge'),
18
19 # Network
20 ('TotalBytesSent','iis.net.bytes_sent', 'rate'),
21 ('TotalBytesReceived', 'iis.net.bytes_rcvd', 'rate'),
22 ('TotalBytesTransferred', 'iis.net.bytes_total', 'rate'),
23 ('CurrentConnections', 'iis.net.num_connections', 'gauge'),
24 ('TotalFilesSent', 'iis.net.files_sent', 'rate'),
25 ('TotalFilesReceived', 'iis.net.files_rcvd', 'rate'),
26 ('TotalConnectionAttemptsAllInstances', 'iis.net.connection_attempts', 'rate'),
27
28 # HTTP Methods
29 ('TotalGetRequests', 'iis.httpd_request_method.get', 'rate'),
30 ('TotalPostRequests', 'iis.httpd_request_method.post', 'rate'),
31 ('TotalHeadRequests', 'iis.httpd_request_method.head', 'rate'),
32 ('TotalPutRequests', 'iis.httpd_request_method.put', 'rate'),
33 ('TotalDeleteRequests', 'iis.httpd_request_method.delete', 'rate'),
34 ('TotalOptionsRequests', 'iis.httpd_request_method.options', 'rate'),
35 ('TotalTraceRequests', 'iis.httpd_request_method.trace', 'rate'),
36
37 # Errors
38 ('TotalNotFoundErrors', 'iis.errors.not_found', 'rate'),
39 ('TotalLockedErrors', 'iis.errors.locked', 'rate'),
40
41 # Users
42 ('TotalAnonymousUsers', 'iis.users.anon', 'rate'),
43 ('TotalNonAnonymousUsers', 'iis.users.nonanon', 'rate'),
44
45 # Requests
46 ('TotalCGIRequests', 'iis.requests.cgi', 'rate'),
47 ('TotalISAPIExtensionRequests', 'iis.requests.isapi', 'rate'),
48 ]
49 SERVICE_CHECK = "iis.site_up"
50
51 NAMESPACE = "root\\CIMV2"
52 CLASS = "Win32_PerfFormattedData_W3SVC_WebService"
53
54 def __init__(self, name, init_config, agentConfig, instances):
55 WinWMICheck.__init__(self, name, init_config, agentConfig, instances)
56
57 def check(self, instance):
58 # Connect to the WMI provider
59 host = instance.get('host', "localhost")
60 user = instance.get('username', "")
61 password = instance.get('password', "")
62 instance_tags = instance.get('tags', [])
63 sites = instance.get('sites', ['_Total'])
64 is_2008 = _is_affirmative(instance.get('is_2008', False))
65
66
67 instance_hash = hash_mutable(instance)
68 instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
69 filters = map(lambda x: {"Name": tuple(('=', x))}, sites)
70
71 metrics_by_property, properties = self._get_wmi_properties(instance_key, self.METRICS, [])
72 if is_2008:
73 for idx, prop in enumerate(properties):
74 if prop == "TotalBytesTransferred".lower():
75 properties[idx] = "TotalBytesTransfered"
76
77 wmi_sampler = self._get_wmi_sampler(
78 instance_key,
79 self.CLASS, properties,
80 filters=filters,
81 host=host, namespace=self.NAMESPACE,
82 username=user, password=password
83 )
84
85 # Sample, extract & submit metrics
86 try:
87 wmi_sampler.sample()
88
89 metrics = self._extract_metrics(wmi_sampler, sites, instance_tags)
90 except TimeoutException:
91 self.log.warning(
92 u"[IIS] WMI query timed out."
93 u" class={wmi_class} - properties={wmi_properties} -"
94 u" filters={filters} - tags={instance_tags}".format(
95 wmi_class=self.CLASS, wmi_properties=properties,
96 filters=filters, instance_tags=instance_tags
97 )
98 )
99 except pythoncom.com_error as e:
100 if '0x80041017' in str(e):
101 self.warning("You may be running IIS6/7 which reports metrics a \
102 little differently. Try enabling the is_2008 flag for this instance.")
103 raise e
104 else:
105 self._submit_events(wmi_sampler, sites)
106 self._submit_metrics(metrics, metrics_by_property)
107
108 def _extract_metrics(self, wmi_sampler, sites, tags):
109 """
110 Extract and tag metrics from the WMISampler.
111
112 Returns: List of WMIMetric
113 ```
114 [
115 WMIMetric("freemegabytes", 19742, ["name:_total"]),
116 WMIMetric("avgdiskbytesperwrite", 1536, ["name:c:"]),
117 ]
118 ```
119 """
120 metrics = []
121
122 for wmi_obj in wmi_sampler:
123 tags = list(tags) if tags else []
124
125 # get site name
126 sitename = wmi_obj['Name']
127
128 # Skip any sites we don't specifically want.
129 if sitename not in sites:
130 continue
131 elif sitename != "_Total":
132 tags.append("site:{0}".format(self.normalize(sitename)))
133
134 # Tag with `tag_queries` parameter
135 for wmi_property, wmi_value in wmi_obj.iteritems():
136 # Tag with `tag_by` parameter
137 try:
138 metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))
139 except ValueError:
140 self.log.warning(u"When extracting metrics with WMI, found a non digit value"
141 " for property '{0}'.".format(wmi_property))
142 continue
143 except TypeError:
144 self.log.warning(u"When extracting metrics with WMI, found a missing property"
145 " '{0}'".format(wmi_property))
146 continue
147 return metrics
148
149 def _submit_events(self, wmi_sampler, sites):
150 expected_sites = set(sites)
151
152 for wmi_obj in wmi_sampler:
153 sitename = wmi_obj['Name']
154 if sitename == "_Total":
155 continue
156
157 uptime = wmi_obj["ServiceUptime"]
158 status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK
159
160 self.service_check(self.SERVICE_CHECK, status, tags=['site:{0}'.format(self.normalize(sitename))])
161 expected_sites.remove(sitename)
162
163 for site in expected_sites:
164 self.service_check(self.SERVICE_CHECK, AgentCheck.CRITICAL,
165 tags=['site:{0}'.format(self.normalize(site))])
166
167
168 def _submit_metrics(self, wmi_metrics, metrics_by_property):
169 for m in wmi_metrics:
170 metric_name = m.name
171 # Windows 2008 sp2 reports it as TotalbytesTransfered
172 # instead of TotalBytesTransferred (single r)
173 if metric_name.lower() == "totalbytestransfered":
174 metric_name = "totalbytestransferred"
175 elif m.name not in metrics_by_property:
176 continue
177
178 metric, mtype = metrics_by_property[metric_name]
179 submittor = getattr(self, mtype)
180 submittor(metric, m.value, m.tags)
181
[end of checks.d/iis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checks.d/iis.py b/checks.d/iis.py
--- a/checks.d/iis.py
+++ b/checks.d/iis.py
@@ -151,9 +151,6 @@
for wmi_obj in wmi_sampler:
sitename = wmi_obj['Name']
- if sitename == "_Total":
- continue
-
uptime = wmi_obj["ServiceUptime"]
status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK
| {"golden_diff": "diff --git a/checks.d/iis.py b/checks.d/iis.py\n--- a/checks.d/iis.py\n+++ b/checks.d/iis.py\n@@ -151,9 +151,6 @@\n \n for wmi_obj in wmi_sampler:\n sitename = wmi_obj['Name']\n- if sitename == \"_Total\":\n- continue\n-\n uptime = wmi_obj[\"ServiceUptime\"]\n status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK\n", "issue": "[iis] Non-digit value in WMI metric name\nGetting this warning in v5.7.1 and `iis.site_up` is permanently triggered.\n\n```\n2016-03-16 14:57:40 GMT Standard Time | WARNING | checks.iis(iis.py:127) | When extracting metrics with WMI, found a non digit value for property 'name'.\n```\n\nThis was working fine in v5.6.1\n\n", "before_files": [{"content": "'''\nCheck the performance counters from IIS\n'''\n# 3p\nimport pythoncom\n\n# project\nfrom checks import AgentCheck\nfrom checks.wmi_check import WinWMICheck, WMIMetric\nfrom config import _is_affirmative\nfrom utils.containers import hash_mutable\nfrom utils.timeout import TimeoutException\n\n\nclass IIS(WinWMICheck):\n METRICS = [\n ('ServiceUptime', 'iis.uptime', 'gauge'),\n\n # Network\n ('TotalBytesSent','iis.net.bytes_sent', 'rate'),\n ('TotalBytesReceived', 'iis.net.bytes_rcvd', 'rate'),\n ('TotalBytesTransferred', 'iis.net.bytes_total', 'rate'),\n ('CurrentConnections', 'iis.net.num_connections', 'gauge'),\n ('TotalFilesSent', 'iis.net.files_sent', 'rate'),\n ('TotalFilesReceived', 'iis.net.files_rcvd', 'rate'),\n ('TotalConnectionAttemptsAllInstances', 'iis.net.connection_attempts', 'rate'),\n\n # HTTP Methods\n ('TotalGetRequests', 'iis.httpd_request_method.get', 'rate'),\n ('TotalPostRequests', 'iis.httpd_request_method.post', 'rate'),\n ('TotalHeadRequests', 'iis.httpd_request_method.head', 'rate'),\n ('TotalPutRequests', 'iis.httpd_request_method.put', 'rate'),\n ('TotalDeleteRequests', 'iis.httpd_request_method.delete', 'rate'),\n ('TotalOptionsRequests', 'iis.httpd_request_method.options', 'rate'),\n ('TotalTraceRequests', 'iis.httpd_request_method.trace', 'rate'),\n\n # Errors\n ('TotalNotFoundErrors', 'iis.errors.not_found', 'rate'),\n ('TotalLockedErrors', 'iis.errors.locked', 'rate'),\n\n # Users\n ('TotalAnonymousUsers', 'iis.users.anon', 'rate'),\n ('TotalNonAnonymousUsers', 'iis.users.nonanon', 'rate'),\n\n # Requests\n ('TotalCGIRequests', 'iis.requests.cgi', 'rate'),\n ('TotalISAPIExtensionRequests', 'iis.requests.isapi', 'rate'),\n ]\n SERVICE_CHECK = \"iis.site_up\"\n\n NAMESPACE = \"root\\\\CIMV2\"\n CLASS = \"Win32_PerfFormattedData_W3SVC_WebService\"\n\n def __init__(self, name, init_config, agentConfig, instances):\n WinWMICheck.__init__(self, name, init_config, agentConfig, instances)\n\n def check(self, instance):\n # Connect to the WMI provider\n host = instance.get('host', \"localhost\")\n user = instance.get('username', \"\")\n password = instance.get('password', \"\")\n instance_tags = instance.get('tags', [])\n sites = instance.get('sites', ['_Total'])\n is_2008 = _is_affirmative(instance.get('is_2008', False))\n\n\n instance_hash = hash_mutable(instance)\n instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)\n filters = map(lambda x: {\"Name\": tuple(('=', x))}, sites)\n\n metrics_by_property, properties = self._get_wmi_properties(instance_key, self.METRICS, [])\n if is_2008:\n for idx, prop in enumerate(properties):\n if prop == \"TotalBytesTransferred\".lower():\n properties[idx] = \"TotalBytesTransfered\"\n\n wmi_sampler = self._get_wmi_sampler(\n instance_key,\n self.CLASS, properties,\n filters=filters,\n host=host, namespace=self.NAMESPACE,\n username=user, password=password\n )\n\n # Sample, extract & submit metrics\n try:\n wmi_sampler.sample()\n\n metrics = self._extract_metrics(wmi_sampler, sites, instance_tags)\n except TimeoutException:\n self.log.warning(\n u\"[IIS] WMI query timed out.\"\n u\" class={wmi_class} - properties={wmi_properties} -\"\n u\" filters={filters} - tags={instance_tags}\".format(\n wmi_class=self.CLASS, wmi_properties=properties,\n filters=filters, instance_tags=instance_tags\n )\n )\n except pythoncom.com_error as e:\n if '0x80041017' in str(e):\n self.warning(\"You may be running IIS6/7 which reports metrics a \\\n little differently. Try enabling the is_2008 flag for this instance.\")\n raise e\n else:\n self._submit_events(wmi_sampler, sites)\n self._submit_metrics(metrics, metrics_by_property)\n\n def _extract_metrics(self, wmi_sampler, sites, tags):\n \"\"\"\n Extract and tag metrics from the WMISampler.\n\n Returns: List of WMIMetric\n ```\n [\n WMIMetric(\"freemegabytes\", 19742, [\"name:_total\"]),\n WMIMetric(\"avgdiskbytesperwrite\", 1536, [\"name:c:\"]),\n ]\n ```\n \"\"\"\n metrics = []\n\n for wmi_obj in wmi_sampler:\n tags = list(tags) if tags else []\n\n # get site name\n sitename = wmi_obj['Name']\n\n # Skip any sites we don't specifically want.\n if sitename not in sites:\n continue\n elif sitename != \"_Total\":\n tags.append(\"site:{0}\".format(self.normalize(sitename)))\n\n # Tag with `tag_queries` parameter\n for wmi_property, wmi_value in wmi_obj.iteritems():\n # Tag with `tag_by` parameter\n try:\n metrics.append(WMIMetric(wmi_property, float(wmi_value), tags))\n except ValueError:\n self.log.warning(u\"When extracting metrics with WMI, found a non digit value\"\n \" for property '{0}'.\".format(wmi_property))\n continue\n except TypeError:\n self.log.warning(u\"When extracting metrics with WMI, found a missing property\"\n \" '{0}'\".format(wmi_property))\n continue\n return metrics\n\n def _submit_events(self, wmi_sampler, sites):\n expected_sites = set(sites)\n\n for wmi_obj in wmi_sampler:\n sitename = wmi_obj['Name']\n if sitename == \"_Total\":\n continue\n\n uptime = wmi_obj[\"ServiceUptime\"]\n status = AgentCheck.CRITICAL if uptime == 0 else AgentCheck.OK\n\n self.service_check(self.SERVICE_CHECK, status, tags=['site:{0}'.format(self.normalize(sitename))])\n expected_sites.remove(sitename)\n\n for site in expected_sites:\n self.service_check(self.SERVICE_CHECK, AgentCheck.CRITICAL,\n tags=['site:{0}'.format(self.normalize(site))])\n\n\n def _submit_metrics(self, wmi_metrics, metrics_by_property):\n for m in wmi_metrics:\n metric_name = m.name\n # Windows 2008 sp2 reports it as TotalbytesTransfered\n # instead of TotalBytesTransferred (single r)\n if metric_name.lower() == \"totalbytestransfered\":\n metric_name = \"totalbytestransferred\"\n elif m.name not in metrics_by_property:\n continue\n\n metric, mtype = metrics_by_property[metric_name]\n submittor = getattr(self, mtype)\n submittor(metric, m.value, m.tags)\n", "path": "checks.d/iis.py"}]} | 2,692 | 113 |
gh_patches_debug_30513 | rasdani/github-patches | git_diff | great-expectations__great_expectations-1292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
QueryBatchKwargsGenerator._get_raw_query produces incorrect filepath
I am using the `QueryBatchKwargsGenerator` to generate `BATCH_KWARGS` using a SQL file I have stored on disk. My `great_expectations.yml` looks like:
```yaml
datasources:
domi:
class_name: SqlAlchemyDatasource
module_name:
data_asset_type:
class_name: SqlAlchemyDataset
credentials:
drivername: postgres
host: ${HOST}
port: ${PORT}
username: ${USER}
password: ${PASSWORD}
database: ${DBNAME}
generators:
apartments-generator:
class_name: QueryBatchKwargsGenerator
name: apartments
```
I am running the following:
```python
>>> import great_expectations as get
>>> context = ge.data_context.DataContext()
>>> context.get_available_data_asset_names()
{'domi': {'apartments-generator': {'names': [('listings', 'query')]}}}
>>> context.build_batch_kwargs(
datasource='domi', generator='apartments-generator', name='listings'
)
```
Which raises this error:

This happens because `_get_raw_query` is doing `tuple('listings')` which returns: `('l', 'i', 's', 't', 'i', 'n', 'g', 's')`)
```python
def _get_raw_query(self, generator_asset):
return self._store_backend.get(tuple(generator_asset))
```
I believe this should just be replaced with:
```python
def _get_raw_query(self, generator_asset):
return self._store_backend.get((generator_asset,))
```
Currently I can get around this by providing a tuple instead of a string, i.e.
```python
context.build_batch_kwargs(
datasource='domi',
generator='apartments-generator',
name=('listings', )
)
```
But that goes against what the docs are saying [here](https://docs.greatexpectations.io/en/latest/module_docs/data_context_module.html#great_expectations.data_context.BaseDataContext.build_batch_kwargs):

</issue>
<code>
[start of great_expectations/datasource/generator/query_generator.py]
1 import os
2 import logging
3
4 from .batch_kwargs_generator import BatchKwargsGenerator
5 from great_expectations.datasource.types import SqlAlchemyDatasourceQueryBatchKwargs
6 from great_expectations.exceptions import (
7 BatchKwargsError,
8 ClassInstantiationError,
9 )
10 from ...data_context.util import instantiate_class_from_config
11
12 logger = logging.getLogger(__name__)
13
14 try:
15 import sqlalchemy
16 from sqlalchemy import create_engine
17 from sqlalchemy.engine import reflection
18 except ImportError:
19 sqlalchemy = None
20 create_engine = None
21 reflection = None
22 logger.debug("Unable to import sqlalchemy.")
23
24
25 class QueryBatchKwargsGenerator(BatchKwargsGenerator):
26 """Produce query-style batch_kwargs from sql files stored on disk
27 """
28 recognized_batch_parameters = {'query_parameters', 'partition_id'}
29
30 def __init__(self, name="default", datasource=None, query_store_backend=None, queries=None):
31 super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)
32 root_directory = None
33 if query_store_backend is None:
34 # We will choose a Tuple store if there is a configured DataContext with a root_directory,
35 # and an InMemoryStore otherwise
36 if datasource and datasource.data_context and datasource.data_context.root_directory:
37 query_store_backend = {
38 "class_name": "TupleFilesystemStoreBackend",
39 "base_directory": os.path.join(datasource.data_context.root_directory, "datasources",
40 datasource.name, "generators", name),
41 "filepath_suffix": ".sql"
42 }
43 root_directory = datasource.data_context.root_directory
44 else:
45 query_store_backend = {
46 "class_name": "InMemoryStoreBackend"
47 }
48 module_name = 'great_expectations.data_context.store'
49 self._store_backend = instantiate_class_from_config(
50 config=query_store_backend,
51 runtime_environment={
52 "root_directory": root_directory
53 },
54 config_defaults={
55 "module_name": module_name
56 }
57 )
58 if not self._store_backend:
59 raise ClassInstantiationError(
60 module_name=module_name,
61 package_name=None,
62 class_name=query_store_backend['class_name']
63 )
64 if queries is not None:
65 for query_name, query in queries.items():
66 self.add_query(query_name, query)
67
68 def _get_raw_query(self, generator_asset):
69 return self._store_backend.get(tuple(generator_asset))
70
71 def _get_iterator(self, generator_asset, query_parameters=None):
72 raw_query = self._get_raw_query(generator_asset)
73 if raw_query is None:
74 logger.warning("No query defined for generator asset: %s" % generator_asset)
75 # There is no valid query path or temp query storage defined with the generator_asset
76 return None
77
78 if query_parameters is None:
79 iter_ = iter([
80 SqlAlchemyDatasourceQueryBatchKwargs(
81 query=raw_query
82 )])
83 else:
84 iter_= iter([
85 SqlAlchemyDatasourceQueryBatchKwargs(
86 query=raw_query,
87 query_parameters=query_parameters
88 )])
89
90 return iter_
91
92 def add_query(self, generator_asset, query):
93 # Backends must have a tuple key; we use only a single-element tuple
94 self._store_backend.set(tuple(generator_asset), query)
95
96 def get_available_data_asset_names(self):
97 defined_queries = self._store_backend.list_keys()
98 # Backends must have a tuple key; we use only a single-element tuple
99 return {"names": [(query_key_tuple[0], "query") for query_key_tuple in defined_queries]}
100
101 def _build_batch_kwargs(self, batch_parameters):
102 """Build batch kwargs from a partition id."""
103 generator_asset = batch_parameters.pop("name")
104 raw_query = self._get_raw_query(generator_asset)
105 partition_id = batch_parameters.pop("partition_id", None)
106 batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)
107 batch_kwargs["query"] = raw_query
108
109 if partition_id:
110 if not batch_kwargs["query_parameters"]:
111 batch_kwargs["query_parameters"] = {}
112 batch_kwargs["query_parameters"]["partition_id"] = partition_id
113
114 return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs)
115
116 def get_available_partition_ids(self, generator_asset):
117 raise BatchKwargsError("QueryBatchKwargsGenerator cannot identify partitions.", {})
118
[end of great_expectations/datasource/generator/query_generator.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/great_expectations/datasource/generator/query_generator.py b/great_expectations/datasource/generator/query_generator.py
--- a/great_expectations/datasource/generator/query_generator.py
+++ b/great_expectations/datasource/generator/query_generator.py
@@ -25,7 +25,7 @@
class QueryBatchKwargsGenerator(BatchKwargsGenerator):
"""Produce query-style batch_kwargs from sql files stored on disk
"""
- recognized_batch_parameters = {'query_parameters', 'partition_id'}
+ recognized_batch_parameters = {'query_parameters', 'partition_id', 'name'}
def __init__(self, name="default", datasource=None, query_store_backend=None, queries=None):
super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)
@@ -66,7 +66,7 @@
self.add_query(query_name, query)
def _get_raw_query(self, generator_asset):
- return self._store_backend.get(tuple(generator_asset))
+ return self._store_backend.get((generator_asset,))
def _get_iterator(self, generator_asset, query_parameters=None):
raw_query = self._get_raw_query(generator_asset)
@@ -91,7 +91,7 @@
def add_query(self, generator_asset, query):
# Backends must have a tuple key; we use only a single-element tuple
- self._store_backend.set(tuple(generator_asset), query)
+ self._store_backend.set((generator_asset,), query)
def get_available_data_asset_names(self):
defined_queries = self._store_backend.list_keys()
| {"golden_diff": "diff --git a/great_expectations/datasource/generator/query_generator.py b/great_expectations/datasource/generator/query_generator.py\n--- a/great_expectations/datasource/generator/query_generator.py\n+++ b/great_expectations/datasource/generator/query_generator.py\n@@ -25,7 +25,7 @@\n class QueryBatchKwargsGenerator(BatchKwargsGenerator):\n \"\"\"Produce query-style batch_kwargs from sql files stored on disk\n \"\"\"\n- recognized_batch_parameters = {'query_parameters', 'partition_id'}\n+ recognized_batch_parameters = {'query_parameters', 'partition_id', 'name'}\n \n def __init__(self, name=\"default\", datasource=None, query_store_backend=None, queries=None):\n super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)\n@@ -66,7 +66,7 @@\n self.add_query(query_name, query)\n \n def _get_raw_query(self, generator_asset):\n- return self._store_backend.get(tuple(generator_asset))\n+ return self._store_backend.get((generator_asset,))\n \n def _get_iterator(self, generator_asset, query_parameters=None):\n raw_query = self._get_raw_query(generator_asset)\n@@ -91,7 +91,7 @@\n \n def add_query(self, generator_asset, query):\n # Backends must have a tuple key; we use only a single-element tuple\n- self._store_backend.set(tuple(generator_asset), query)\n+ self._store_backend.set((generator_asset,), query)\n \n def get_available_data_asset_names(self):\n defined_queries = self._store_backend.list_keys()\n", "issue": "QueryBatchKwargsGenerator._get_raw_query produces incorrect filepath\nI am using the `QueryBatchKwargsGenerator` to generate `BATCH_KWARGS` using a SQL file I have stored on disk. My `great_expectations.yml` looks like:\r\n\r\n```yaml\r\ndatasources:\r\n domi:\r\n class_name: SqlAlchemyDatasource\r\n module_name:\r\n data_asset_type:\r\n class_name: SqlAlchemyDataset\r\n credentials:\r\n drivername: postgres\r\n host: ${HOST}\r\n port: ${PORT}\r\n username: ${USER}\r\n password: ${PASSWORD}\r\n database: ${DBNAME}\r\n generators:\r\n apartments-generator:\r\n class_name: QueryBatchKwargsGenerator\r\n name: apartments\r\n```\r\n\r\nI am running the following:\r\n\r\n```python\r\n>>> import great_expectations as get\r\n\r\n>>> context = ge.data_context.DataContext()\r\n>>> context.get_available_data_asset_names()\r\n{'domi': {'apartments-generator': {'names': [('listings', 'query')]}}}\r\n\r\n>>> context.build_batch_kwargs(\r\n datasource='domi', generator='apartments-generator', name='listings'\r\n)\r\n```\r\n\r\nWhich raises this error:\r\n\r\n\r\n\r\nThis happens because `_get_raw_query` is doing `tuple('listings')` which returns: `('l', 'i', 's', 't', 'i', 'n', 'g', 's')`)\r\n\r\n```python\r\n def _get_raw_query(self, generator_asset):\r\n return self._store_backend.get(tuple(generator_asset))\r\n```\r\n\r\nI believe this should just be replaced with:\r\n\r\n```python\r\n def _get_raw_query(self, generator_asset):\r\n return self._store_backend.get((generator_asset,))\r\n```\r\n\r\nCurrently I can get around this by providing a tuple instead of a string, i.e.\r\n\r\n```python\r\ncontext.build_batch_kwargs(\r\n datasource='domi',\r\n generator='apartments-generator',\r\n name=('listings', )\r\n)\r\n```\r\n\r\nBut that goes against what the docs are saying [here](https://docs.greatexpectations.io/en/latest/module_docs/data_context_module.html#great_expectations.data_context.BaseDataContext.build_batch_kwargs):\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nimport logging\n\nfrom .batch_kwargs_generator import BatchKwargsGenerator\nfrom great_expectations.datasource.types import SqlAlchemyDatasourceQueryBatchKwargs\nfrom great_expectations.exceptions import (\n BatchKwargsError,\n ClassInstantiationError,\n)\nfrom ...data_context.util import instantiate_class_from_config\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import sqlalchemy\n from sqlalchemy import create_engine\n from sqlalchemy.engine import reflection\nexcept ImportError:\n sqlalchemy = None\n create_engine = None\n reflection = None\n logger.debug(\"Unable to import sqlalchemy.\")\n\n\nclass QueryBatchKwargsGenerator(BatchKwargsGenerator):\n \"\"\"Produce query-style batch_kwargs from sql files stored on disk\n \"\"\"\n recognized_batch_parameters = {'query_parameters', 'partition_id'}\n\n def __init__(self, name=\"default\", datasource=None, query_store_backend=None, queries=None):\n super(QueryBatchKwargsGenerator, self).__init__(name=name, datasource=datasource)\n root_directory = None\n if query_store_backend is None:\n # We will choose a Tuple store if there is a configured DataContext with a root_directory,\n # and an InMemoryStore otherwise\n if datasource and datasource.data_context and datasource.data_context.root_directory:\n query_store_backend = {\n \"class_name\": \"TupleFilesystemStoreBackend\",\n \"base_directory\": os.path.join(datasource.data_context.root_directory, \"datasources\",\n datasource.name, \"generators\", name),\n \"filepath_suffix\": \".sql\"\n }\n root_directory = datasource.data_context.root_directory\n else:\n query_store_backend = {\n \"class_name\": \"InMemoryStoreBackend\"\n }\n module_name = 'great_expectations.data_context.store'\n self._store_backend = instantiate_class_from_config(\n config=query_store_backend,\n runtime_environment={\n \"root_directory\": root_directory\n },\n config_defaults={\n \"module_name\": module_name\n }\n )\n if not self._store_backend:\n raise ClassInstantiationError(\n module_name=module_name,\n package_name=None,\n class_name=query_store_backend['class_name']\n )\n if queries is not None:\n for query_name, query in queries.items():\n self.add_query(query_name, query)\n\n def _get_raw_query(self, generator_asset):\n return self._store_backend.get(tuple(generator_asset))\n\n def _get_iterator(self, generator_asset, query_parameters=None):\n raw_query = self._get_raw_query(generator_asset)\n if raw_query is None:\n logger.warning(\"No query defined for generator asset: %s\" % generator_asset)\n # There is no valid query path or temp query storage defined with the generator_asset\n return None\n\n if query_parameters is None:\n iter_ = iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query\n )])\n else:\n iter_= iter([\n SqlAlchemyDatasourceQueryBatchKwargs(\n query=raw_query,\n query_parameters=query_parameters\n )])\n\n return iter_\n\n def add_query(self, generator_asset, query):\n # Backends must have a tuple key; we use only a single-element tuple\n self._store_backend.set(tuple(generator_asset), query)\n\n def get_available_data_asset_names(self):\n defined_queries = self._store_backend.list_keys()\n # Backends must have a tuple key; we use only a single-element tuple\n return {\"names\": [(query_key_tuple[0], \"query\") for query_key_tuple in defined_queries]}\n\n def _build_batch_kwargs(self, batch_parameters):\n \"\"\"Build batch kwargs from a partition id.\"\"\"\n generator_asset = batch_parameters.pop(\"name\")\n raw_query = self._get_raw_query(generator_asset)\n partition_id = batch_parameters.pop(\"partition_id\", None)\n batch_kwargs = self._datasource.process_batch_parameters(**batch_parameters)\n batch_kwargs[\"query\"] = raw_query\n\n if partition_id:\n if not batch_kwargs[\"query_parameters\"]:\n batch_kwargs[\"query_parameters\"] = {}\n batch_kwargs[\"query_parameters\"][\"partition_id\"] = partition_id\n\n return SqlAlchemyDatasourceQueryBatchKwargs(batch_kwargs)\n\n def get_available_partition_ids(self, generator_asset):\n raise BatchKwargsError(\"QueryBatchKwargsGenerator cannot identify partitions.\", {})\n", "path": "great_expectations/datasource/generator/query_generator.py"}]} | 2,274 | 348 |
gh_patches_debug_33373 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-378 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix session injection message from msrest
Support session injection in MicrosoftAppCredentials fixing the warning of: Your credentials class does not support session injection. Performance will not be at the maximum.
</issue>
<code>
[start of libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py]
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from datetime import datetime, timedelta
5 from urllib.parse import urlparse
6 from msrest.authentication import BasicTokenAuthentication, Authentication
7 import requests
8 from .constants import Constants
9
10 # TODO: Decide to move this to Constants or viceversa (when porting OAuth)
11 AUTH_SETTINGS = {
12 "refreshEndpoint": "https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token",
13 "refreshScope": "https://api.botframework.com/.default",
14 "botConnectorOpenIdMetadata": "https://login.botframework.com/v1/.well-known/openidconfiguration",
15 "botConnectorIssuer": "https://api.botframework.com",
16 "emulatorOpenIdMetadata": "https://login.microsoftonline.com/botframework.com/v2.0/"
17 ".well-known/openid-configuration",
18 "emulatorAuthV31IssuerV1": "https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/",
19 "emulatorAuthV31IssuerV2": "https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0",
20 "emulatorAuthV32IssuerV1": "https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/",
21 "emulatorAuthV32IssuerV2": "https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0",
22 }
23
24
25 class _OAuthResponse:
26 def __init__(self):
27 self.token_type = None
28 self.expires_in = None
29 self.access_token = None
30 self.expiration_time = None
31
32 @staticmethod
33 def from_json(json_values):
34 result = _OAuthResponse()
35 try:
36 result.token_type = json_values["token_type"]
37 result.access_token = json_values["access_token"]
38 result.expires_in = json_values["expires_in"]
39 except KeyError:
40 pass
41 return result
42
43
44 class MicrosoftAppCredentials(Authentication):
45 """
46 MicrosoftAppCredentials auth implementation and cache.
47 """
48
49 schema = "Bearer"
50
51 trustedHostNames = {
52 "state.botframework.com": datetime.max,
53 "api.botframework.com": datetime.max,
54 "token.botframework.com": datetime.max,
55 "state.botframework.azure.us": datetime.max,
56 "api.botframework.azure.us": datetime.max,
57 "token.botframework.azure.us": datetime.max,
58 }
59 cache = {}
60
61 def __init__(self, app_id: str, password: str, channel_auth_tenant: str = None):
62 """
63 Initializes a new instance of MicrosoftAppCredentials class
64 :param app_id: The Microsoft app ID.
65 :param app_password: The Microsoft app password.
66 :param channel_auth_tenant: Optional. The oauth token tenant.
67 """
68 # The configuration property for the Microsoft app ID.
69 self.microsoft_app_id = app_id
70 # The configuration property for the Microsoft app Password.
71 self.microsoft_app_password = password
72 tenant = (
73 channel_auth_tenant
74 if channel_auth_tenant
75 else Constants.DEFAULT_CHANNEL_AUTH_TENANT
76 )
77 self.oauth_endpoint = (
78 Constants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX
79 + tenant
80 + Constants.TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH
81 )
82 self.oauth_scope = AUTH_SETTINGS["refreshScope"]
83 self.token_cache_key = app_id + "-cache"
84
85 def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ
86 """
87 Gets the signed session.
88 :returns: Signed requests.Session object
89 """
90 auth_token = self.get_access_token()
91
92 basic_authentication = BasicTokenAuthentication({"access_token": auth_token})
93 session = basic_authentication.signed_session()
94
95 # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't
96 # be an "Authorization" header on the outgoing activity.
97 if not self.microsoft_app_id and not self.microsoft_app_password:
98 del session.headers["Authorization"]
99 return session
100
101 def get_access_token(self, force_refresh: bool = False) -> str:
102 """
103 Gets an OAuth access token.
104 :param force_refresh: True to force a refresh of the token; or false to get
105 a cached token if it exists.
106 :returns: Access token string
107 """
108 if self.microsoft_app_id and self.microsoft_app_password:
109 if not force_refresh:
110 # check the global cache for the token. If we have it, and it's valid, we're done.
111 oauth_token = MicrosoftAppCredentials.cache.get(
112 self.token_cache_key, None
113 )
114 if oauth_token is not None:
115 # we have the token. Is it valid?
116 if oauth_token.expiration_time > datetime.now():
117 return oauth_token.access_token
118 # We need to refresh the token, because:
119 # 1. The user requested it via the force_refresh parameter
120 # 2. We have it, but it's expired
121 # 3. We don't have it in the cache.
122 oauth_token = self.refresh_token()
123 MicrosoftAppCredentials.cache.setdefault(self.token_cache_key, oauth_token)
124 return oauth_token.access_token
125 return ""
126
127 def refresh_token(self) -> _OAuthResponse:
128 """
129 returns: _OAuthResponse
130 """
131 options = {
132 "grant_type": "client_credentials",
133 "client_id": self.microsoft_app_id,
134 "client_secret": self.microsoft_app_password,
135 "scope": self.oauth_scope,
136 }
137
138 response = requests.post(self.oauth_endpoint, data=options)
139 response.raise_for_status()
140
141 oauth_response = _OAuthResponse.from_json(response.json())
142 oauth_response.expiration_time = datetime.now() + timedelta(
143 seconds=(oauth_response.expires_in - 300)
144 )
145
146 return oauth_response
147
148 @staticmethod
149 def trust_service_url(service_url: str, expiration=None):
150 """
151 Checks if the service url is for a trusted host or not.
152 :param service_url: The service url.
153 :param expiration: The expiration time after which this service url is not trusted anymore.
154 :returns: True if the host of the service url is trusted; False otherwise.
155 """
156 if expiration is None:
157 expiration = datetime.now() + timedelta(days=1)
158 host = urlparse(service_url).hostname
159 if host is not None:
160 MicrosoftAppCredentials.trustedHostNames[host] = expiration
161
162 @staticmethod
163 def is_trusted_service(service_url: str) -> bool:
164 """
165 Checks if the service url is for a trusted host or not.
166 :param service_url: The service url.
167 :returns: True if the host of the service url is trusted; False otherwise.
168 """
169 host = urlparse(service_url).hostname
170 if host is not None:
171 return MicrosoftAppCredentials._is_trusted_url(host)
172 return False
173
174 @staticmethod
175 def _is_trusted_url(host: str) -> bool:
176 expiration = MicrosoftAppCredentials.trustedHostNames.get(host, datetime.min)
177 return expiration > (datetime.now() - timedelta(minutes=5))
178
[end of libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py
@@ -3,8 +3,9 @@
from datetime import datetime, timedelta
from urllib.parse import urlparse
-from msrest.authentication import BasicTokenAuthentication, Authentication
import requests
+
+from msrest.authentication import Authentication
from .constants import Constants
# TODO: Decide to move this to Constants or viceversa (when porting OAuth)
@@ -82,20 +83,25 @@
self.oauth_scope = AUTH_SETTINGS["refreshScope"]
self.token_cache_key = app_id + "-cache"
- def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ
+ # pylint: disable=arguments-differ
+ def signed_session(self, session: requests.Session = None) -> requests.Session:
"""
Gets the signed session.
:returns: Signed requests.Session object
"""
- auth_token = self.get_access_token()
-
- basic_authentication = BasicTokenAuthentication({"access_token": auth_token})
- session = basic_authentication.signed_session()
+ if not session:
+ session = requests.Session()
# If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't
# be an "Authorization" header on the outgoing activity.
if not self.microsoft_app_id and not self.microsoft_app_password:
- del session.headers["Authorization"]
+ session.headers.pop("Authorization", None)
+
+ elif not session.headers.get("Authorization"):
+ auth_token = self.get_access_token()
+ header = "{} {}".format("Bearer", auth_token)
+ session.headers["Authorization"] = header
+
return session
def get_access_token(self, force_refresh: bool = False) -> str:
| {"golden_diff": "diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py\n@@ -3,8 +3,9 @@\n \n from datetime import datetime, timedelta\n from urllib.parse import urlparse\n-from msrest.authentication import BasicTokenAuthentication, Authentication\n import requests\n+\n+from msrest.authentication import Authentication\n from .constants import Constants\n \n # TODO: Decide to move this to Constants or viceversa (when porting OAuth)\n@@ -82,20 +83,25 @@\n self.oauth_scope = AUTH_SETTINGS[\"refreshScope\"]\n self.token_cache_key = app_id + \"-cache\"\n \n- def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ\n+ # pylint: disable=arguments-differ\n+ def signed_session(self, session: requests.Session = None) -> requests.Session:\n \"\"\"\n Gets the signed session.\n :returns: Signed requests.Session object\n \"\"\"\n- auth_token = self.get_access_token()\n-\n- basic_authentication = BasicTokenAuthentication({\"access_token\": auth_token})\n- session = basic_authentication.signed_session()\n+ if not session:\n+ session = requests.Session()\n \n # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't\n # be an \"Authorization\" header on the outgoing activity.\n if not self.microsoft_app_id and not self.microsoft_app_password:\n- del session.headers[\"Authorization\"]\n+ session.headers.pop(\"Authorization\", None)\n+\n+ elif not session.headers.get(\"Authorization\"):\n+ auth_token = self.get_access_token()\n+ header = \"{} {}\".format(\"Bearer\", auth_token)\n+ session.headers[\"Authorization\"] = header\n+\n return session\n \n def get_access_token(self, force_refresh: bool = False) -> str:\n", "issue": "Fix session injection message from msrest\nSupport session injection in MicrosoftAppCredentials fixing the warning of: Your credentials class does not support session injection. Performance will not be at the maximum.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom datetime import datetime, timedelta\nfrom urllib.parse import urlparse\nfrom msrest.authentication import BasicTokenAuthentication, Authentication\nimport requests\nfrom .constants import Constants\n\n# TODO: Decide to move this to Constants or viceversa (when porting OAuth)\nAUTH_SETTINGS = {\n \"refreshEndpoint\": \"https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token\",\n \"refreshScope\": \"https://api.botframework.com/.default\",\n \"botConnectorOpenIdMetadata\": \"https://login.botframework.com/v1/.well-known/openidconfiguration\",\n \"botConnectorIssuer\": \"https://api.botframework.com\",\n \"emulatorOpenIdMetadata\": \"https://login.microsoftonline.com/botframework.com/v2.0/\"\n \".well-known/openid-configuration\",\n \"emulatorAuthV31IssuerV1\": \"https://sts.windows.net/d6d49420-f39b-4df7-a1dc-d59a935871db/\",\n \"emulatorAuthV31IssuerV2\": \"https://login.microsoftonline.com/d6d49420-f39b-4df7-a1dc-d59a935871db/v2.0\",\n \"emulatorAuthV32IssuerV1\": \"https://sts.windows.net/f8cdef31-a31e-4b4a-93e4-5f571e91255a/\",\n \"emulatorAuthV32IssuerV2\": \"https://login.microsoftonline.com/f8cdef31-a31e-4b4a-93e4-5f571e91255a/v2.0\",\n}\n\n\nclass _OAuthResponse:\n def __init__(self):\n self.token_type = None\n self.expires_in = None\n self.access_token = None\n self.expiration_time = None\n\n @staticmethod\n def from_json(json_values):\n result = _OAuthResponse()\n try:\n result.token_type = json_values[\"token_type\"]\n result.access_token = json_values[\"access_token\"]\n result.expires_in = json_values[\"expires_in\"]\n except KeyError:\n pass\n return result\n\n\nclass MicrosoftAppCredentials(Authentication):\n \"\"\"\n MicrosoftAppCredentials auth implementation and cache.\n \"\"\"\n\n schema = \"Bearer\"\n\n trustedHostNames = {\n \"state.botframework.com\": datetime.max,\n \"api.botframework.com\": datetime.max,\n \"token.botframework.com\": datetime.max,\n \"state.botframework.azure.us\": datetime.max,\n \"api.botframework.azure.us\": datetime.max,\n \"token.botframework.azure.us\": datetime.max,\n }\n cache = {}\n\n def __init__(self, app_id: str, password: str, channel_auth_tenant: str = None):\n \"\"\"\n Initializes a new instance of MicrosoftAppCredentials class\n :param app_id: The Microsoft app ID.\n :param app_password: The Microsoft app password.\n :param channel_auth_tenant: Optional. The oauth token tenant.\n \"\"\"\n # The configuration property for the Microsoft app ID.\n self.microsoft_app_id = app_id\n # The configuration property for the Microsoft app Password.\n self.microsoft_app_password = password\n tenant = (\n channel_auth_tenant\n if channel_auth_tenant\n else Constants.DEFAULT_CHANNEL_AUTH_TENANT\n )\n self.oauth_endpoint = (\n Constants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX\n + tenant\n + Constants.TO_CHANNEL_FROM_BOT_TOKEN_ENDPOINT_PATH\n )\n self.oauth_scope = AUTH_SETTINGS[\"refreshScope\"]\n self.token_cache_key = app_id + \"-cache\"\n\n def signed_session(self) -> requests.Session: # pylint: disable=arguments-differ\n \"\"\"\n Gets the signed session.\n :returns: Signed requests.Session object\n \"\"\"\n auth_token = self.get_access_token()\n\n basic_authentication = BasicTokenAuthentication({\"access_token\": auth_token})\n session = basic_authentication.signed_session()\n\n # If there is no microsoft_app_id and no self.microsoft_app_password, then there shouldn't\n # be an \"Authorization\" header on the outgoing activity.\n if not self.microsoft_app_id and not self.microsoft_app_password:\n del session.headers[\"Authorization\"]\n return session\n\n def get_access_token(self, force_refresh: bool = False) -> str:\n \"\"\"\n Gets an OAuth access token.\n :param force_refresh: True to force a refresh of the token; or false to get\n a cached token if it exists.\n :returns: Access token string\n \"\"\"\n if self.microsoft_app_id and self.microsoft_app_password:\n if not force_refresh:\n # check the global cache for the token. If we have it, and it's valid, we're done.\n oauth_token = MicrosoftAppCredentials.cache.get(\n self.token_cache_key, None\n )\n if oauth_token is not None:\n # we have the token. Is it valid?\n if oauth_token.expiration_time > datetime.now():\n return oauth_token.access_token\n # We need to refresh the token, because:\n # 1. The user requested it via the force_refresh parameter\n # 2. We have it, but it's expired\n # 3. We don't have it in the cache.\n oauth_token = self.refresh_token()\n MicrosoftAppCredentials.cache.setdefault(self.token_cache_key, oauth_token)\n return oauth_token.access_token\n return \"\"\n\n def refresh_token(self) -> _OAuthResponse:\n \"\"\"\n returns: _OAuthResponse\n \"\"\"\n options = {\n \"grant_type\": \"client_credentials\",\n \"client_id\": self.microsoft_app_id,\n \"client_secret\": self.microsoft_app_password,\n \"scope\": self.oauth_scope,\n }\n\n response = requests.post(self.oauth_endpoint, data=options)\n response.raise_for_status()\n\n oauth_response = _OAuthResponse.from_json(response.json())\n oauth_response.expiration_time = datetime.now() + timedelta(\n seconds=(oauth_response.expires_in - 300)\n )\n\n return oauth_response\n\n @staticmethod\n def trust_service_url(service_url: str, expiration=None):\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :param expiration: The expiration time after which this service url is not trusted anymore.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n if expiration is None:\n expiration = datetime.now() + timedelta(days=1)\n host = urlparse(service_url).hostname\n if host is not None:\n MicrosoftAppCredentials.trustedHostNames[host] = expiration\n\n @staticmethod\n def is_trusted_service(service_url: str) -> bool:\n \"\"\"\n Checks if the service url is for a trusted host or not.\n :param service_url: The service url.\n :returns: True if the host of the service url is trusted; False otherwise.\n \"\"\"\n host = urlparse(service_url).hostname\n if host is not None:\n return MicrosoftAppCredentials._is_trusted_url(host)\n return False\n\n @staticmethod\n def _is_trusted_url(host: str) -> bool:\n expiration = MicrosoftAppCredentials.trustedHostNames.get(host, datetime.min)\n return expiration > (datetime.now() - timedelta(minutes=5))\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_app_credentials.py"}]} | 2,634 | 445 |
gh_patches_debug_10612 | rasdani/github-patches | git_diff | fedora-infra__bodhi-1450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
There are multiple alembic heads on the develop branch
The migrations can't be applied on the develop branch because there are multiple alembic heads:
```
[vagrant@bodhi-dev bodhi]$ alembic upgrade head
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py
file, filename, etc = imp.find_module(subname, path)
/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the "pyramid.security.unauthenticated_userid" API is now deprecated. It will be removed in Pyramd 1.8. Use the "unauthenticated_userid" attribute of the Pyramid request instead.
from pyramid.security import unauthenticated_userid
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
ERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads
FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,
'<branchname>@head' to narrow to a specific head, or 'heads' for all heads
```
</issue>
<code>
[start of alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py]
1 """Convert the builds table to be polymorphic.
2
3 Revision ID: 9241378c92ab
4 Revises: fc6b0169c596
5 Create Date: 2017-04-06 20:37:24.766366
6 """
7 from alembic import op
8 import sqlalchemy as sa
9
10
11 # revision identifiers, used by Alembic.
12 revision = '9241378c92ab'
13 down_revision = 'fc6b0169c596'
14
15
16 def upgrade():
17 """Add the type column to the builds table."""
18 # The default of ``1`` is the RPM Build type.
19 op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
20 op.alter_column('builds', 'type', server_default=None)
21
22
23 def downgrade():
24 """Remove the type column from the builds table."""
25 op.drop_column('builds', 'type')
26
[end of alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
@@ -1,7 +1,7 @@
"""Convert the builds table to be polymorphic.
Revision ID: 9241378c92ab
-Revises: fc6b0169c596
+Revises: 12d3e8695f90
Create Date: 2017-04-06 20:37:24.766366
"""
from alembic import op
@@ -10,7 +10,7 @@
# revision identifiers, used by Alembic.
revision = '9241378c92ab'
-down_revision = 'fc6b0169c596'
+down_revision = '12d3e8695f90'
def upgrade():
| {"golden_diff": "diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n@@ -1,7 +1,7 @@\n \"\"\"Convert the builds table to be polymorphic.\n \n Revision ID: 9241378c92ab\n-Revises: fc6b0169c596\n+Revises: 12d3e8695f90\n Create Date: 2017-04-06 20:37:24.766366\n \"\"\"\n from alembic import op\n@@ -10,7 +10,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '9241378c92ab'\n-down_revision = 'fc6b0169c596'\n+down_revision = '12d3e8695f90'\n \n \n def upgrade():\n", "issue": "There are multiple alembic heads on the develop branch\nThe migrations can't be applied on the develop branch because there are multiple alembic heads:\r\n\r\n```\r\n[vagrant@bodhi-dev bodhi]$ alembic upgrade head\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib64/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/zope': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/paste': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/moksha': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/usr/lib64/python2.7/pkgutil.py:186: ImportWarning: Not importing directory '/usr/lib/python2.7/site-packages/repoze': missing __init__.py\r\n file, filename, etc = imp.find_module(subname, path)\r\n/home/vagrant/bodhi/bodhi/server/__init__.py:26: DeprecationWarning: unauthenticated_userid: As of Pyramid 1.5 the \"pyramid.security.unauthenticated_userid\" API is now deprecated. It will be removed in Pyramd 1.8. Use the \"unauthenticated_userid\" attribute of the Pyramid request instead.\r\n from pyramid.security import unauthenticated_userid\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nERROR [alembic.util.messaging] Multiple head revisions are present for given argument 'head'; please specify a specific target revision, '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n FAILED: Multiple head revisions are present for given argument 'head'; please specify a specific target revision,\r\n '<branchname>@head' to narrow to a specific head, or 'heads' for all heads\r\n```\n", "before_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: fc6b0169c596\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = 'fc6b0169c596'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n # The default of ``1`` is the RPM Build type.\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n op.alter_column('builds', 'type', server_default=None)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}]} | 1,387 | 307 |
gh_patches_debug_3603 | rasdani/github-patches | git_diff | privacyidea__privacyidea-1570 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Realm-Select box with broken "placeholder"
In the login screen there is a realm select box.
The placeholder for the select box does not work:
https://github.com/privacyidea/privacyidea/blob/master/privacyidea/static/components/login/views/login.html#L63
We could either fix the placeholder or preselect the default realm.
</issue>
<code>
[start of privacyidea/webui/login.py]
1 # -*- coding: utf-8 -*-
2 #
3 # http://www.privacyidea.org
4 # (c) cornelius kölbel, privacyidea.org
5 #
6 # 2017-11-14 Cornelius Kölbel <[email protected]>
7 # Add custom baseline and menu
8 # 2016-01-07 Cornelius Kölbel <[email protected]>
9 # Add password reset
10 # 2015-11-04 Cornelius Kölbel <[email protected]>
11 # Add REMOTE_USER check
12 # 2014-12-22 Cornelius Kölbel, <[email protected]>
13 #
14 # This code is free software; you can redistribute it and/or
15 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
16 # License as published by the Free Software Foundation; either
17 # version 3 of the License, or any later version.
18 #
19 # This code is distributed in the hope that it will be useful,
20 # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
23 #
24 # You should have received a copy of the GNU Affero General Public
25 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
26 #
27 from privacyidea.lib.queue import has_job_queue
28
29 __doc__ = """This is the starting point for the single web application.
30 Other html code is dynamically loaded via angularJS and located in
31 /static/views/...
32 """
33 __author__ = "Cornelius Kölbel <[email protected]>"
34
35 from flask import (Blueprint, render_template, request,
36 current_app)
37 from privacyidea.api.lib.prepolicy import is_remote_user_allowed
38 from privacyidea.lib.passwordreset import is_password_reset
39 from privacyidea.lib.error import HSMException
40 from privacyidea.lib.realm import get_realms
41 from privacyidea.lib.policy import PolicyClass, ACTION, SCOPE
42 from privacyidea.lib.subscriptions import subscription_status
43 from privacyidea.lib.utils import get_client_ip
44 from privacyidea.lib.config import get_from_config, SYSCONF
45
46 DEFAULT_THEME = "/static/contrib/css/bootstrap-theme.css"
47
48 login_blueprint = Blueprint('login_blueprint', __name__)
49
50
51 @login_blueprint.route('/', methods=['GET'])
52 def single_page_application():
53 instance = request.script_root
54 if instance == "/":
55 instance = ""
56 # The backend URL should come from the configuration of the system.
57 backend_url = ""
58
59 if current_app.config.get("PI_UI_DEACTIVATED"):
60 # Do not provide the UI
61 return render_template("deactivated.html")
62
63 # The default theme. We can change this later
64 theme = current_app.config.get("PI_CSS", DEFAULT_THEME)
65 # Get further customizations
66 customization = current_app.config.get("PI_CUSTOMIZATION",
67 "/static/customize/")
68 customization = customization.strip('/')
69 # TODO: we should add the CSS into PI_CUSTOMZATION/css
70 # Enrollment-Wizard:
71 # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html
72 # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html
73 # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html
74 # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html
75 # Get the hidden external links
76 external_links = current_app.config.get("PI_EXTERNAL_LINKS", True)
77 # Get the logo file
78 logo = current_app.config.get("PI_LOGO", "privacyIDEA1.png")
79 browser_lang = request.accept_languages.best_match(["en", "de", "de-DE"], default="en").split("-")[0]
80 # check if login with REMOTE_USER is allowed.
81 remote_user = ""
82 password_reset = False
83 if not hasattr(request, "all_data"):
84 request.all_data = {}
85 # Depending on displaying the realm dropdown, we fill realms or not.
86 policy_object = PolicyClass()
87 realms = ""
88 client_ip = get_client_ip(request,
89 get_from_config(SYSCONF.OVERRIDECLIENT))
90 realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN,
91 scope=SCOPE.WEBUI,
92 client=client_ip,
93 active=True)
94 if realm_dropdown:
95 try:
96 realm_dropdown_values = policy_object.get_action_values(
97 action=ACTION.REALMDROPDOWN,
98 scope=SCOPE.WEBUI,
99 client=client_ip)
100 # Use the realms from the policy.
101 realms = ",".join(realm_dropdown_values)
102 except AttributeError as ex:
103 # The policy is still a boolean realm_dropdown action
104 # Thus we display ALL realms
105 realms = ",".join(get_realms())
106 if realms:
107 realms = "," + realms
108
109 try:
110 if is_remote_user_allowed(request):
111 remote_user = request.remote_user
112 password_reset = is_password_reset()
113 hsm_ready = True
114 except HSMException:
115 hsm_ready = False
116
117 # Use policies to determine the customization of menu
118 # and baseline. get_action_values returns an array!
119 sub_state = subscription_status()
120 customization_menu_file = policy_object.get_action_values(
121 allow_white_space_in_action=True,
122 action=ACTION.CUSTOM_MENU,
123 scope=SCOPE.WEBUI,
124 client=client_ip, unique=True)
125 if len(customization_menu_file) and list(customization_menu_file)[0] \
126 and sub_state not in [1, 2]:
127 customization_menu_file = list(customization_menu_file)[0]
128 else:
129 customization_menu_file = "templates/menu.html"
130 customization_baseline_file = policy_object.get_action_values(
131 allow_white_space_in_action=True,
132 action=ACTION.CUSTOM_BASELINE,
133 scope=SCOPE.WEBUI,
134 client=client_ip, unique=True)
135 if len(customization_baseline_file) and list(customization_baseline_file)[0] \
136 and sub_state not in [1, 2]:
137 customization_baseline_file = list(customization_baseline_file)[0]
138 else:
139 customization_baseline_file = "templates/baseline.html"
140
141 login_text = policy_object.get_action_values(
142 allow_white_space_in_action=True,
143 action=ACTION.LOGIN_TEXT,
144 scope=SCOPE.WEBUI,
145 client=client_ip, unique=True
146 )
147 if len(login_text) and list(login_text)[0] and sub_state not in [1, 2]:
148 login_text = list(login_text)[0]
149 else:
150 login_text = ""
151
152 return render_template("index.html", instance=instance,
153 backendUrl=backend_url,
154 browser_lang=browser_lang,
155 remote_user=remote_user,
156 theme=theme,
157 password_reset=password_reset,
158 hsm_ready=hsm_ready,
159 has_job_queue=str(has_job_queue()),
160 customization=customization,
161 customization_menu_file=customization_menu_file,
162 customization_baseline_file=customization_baseline_file,
163 realms=realms,
164 external_links=external_links,
165 login_text=login_text,
166 logo=logo)
167
168
[end of privacyidea/webui/login.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/privacyidea/webui/login.py b/privacyidea/webui/login.py
--- a/privacyidea/webui/login.py
+++ b/privacyidea/webui/login.py
@@ -103,8 +103,6 @@
# The policy is still a boolean realm_dropdown action
# Thus we display ALL realms
realms = ",".join(get_realms())
- if realms:
- realms = "," + realms
try:
if is_remote_user_allowed(request):
| {"golden_diff": "diff --git a/privacyidea/webui/login.py b/privacyidea/webui/login.py\n--- a/privacyidea/webui/login.py\n+++ b/privacyidea/webui/login.py\n@@ -103,8 +103,6 @@\n # The policy is still a boolean realm_dropdown action\n # Thus we display ALL realms\n realms = \",\".join(get_realms())\n- if realms:\n- realms = \",\" + realms\n \n try:\n if is_remote_user_allowed(request):\n", "issue": "Realm-Select box with broken \"placeholder\"\nIn the login screen there is a realm select box.\r\n\r\nThe placeholder for the select box does not work:\r\nhttps://github.com/privacyidea/privacyidea/blob/master/privacyidea/static/components/login/views/login.html#L63\r\n\r\nWe could either fix the placeholder or preselect the default realm.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# http://www.privacyidea.org\n# (c) cornelius k\u00f6lbel, privacyidea.org\n#\n# 2017-11-14 Cornelius K\u00f6lbel <[email protected]>\n# Add custom baseline and menu\n# 2016-01-07 Cornelius K\u00f6lbel <[email protected]>\n# Add password reset\n# 2015-11-04 Cornelius K\u00f6lbel <[email protected]>\n# Add REMOTE_USER check\n# 2014-12-22 Cornelius K\u00f6lbel, <[email protected]>\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.queue import has_job_queue\n\n__doc__ = \"\"\"This is the starting point for the single web application.\nOther html code is dynamically loaded via angularJS and located in\n/static/views/...\n\"\"\"\n__author__ = \"Cornelius K\u00f6lbel <[email protected]>\"\n\nfrom flask import (Blueprint, render_template, request,\n current_app)\nfrom privacyidea.api.lib.prepolicy import is_remote_user_allowed\nfrom privacyidea.lib.passwordreset import is_password_reset\nfrom privacyidea.lib.error import HSMException\nfrom privacyidea.lib.realm import get_realms\nfrom privacyidea.lib.policy import PolicyClass, ACTION, SCOPE\nfrom privacyidea.lib.subscriptions import subscription_status\nfrom privacyidea.lib.utils import get_client_ip\nfrom privacyidea.lib.config import get_from_config, SYSCONF\n\nDEFAULT_THEME = \"/static/contrib/css/bootstrap-theme.css\"\n\nlogin_blueprint = Blueprint('login_blueprint', __name__)\n\n\n@login_blueprint.route('/', methods=['GET'])\ndef single_page_application():\n instance = request.script_root\n if instance == \"/\":\n instance = \"\"\n # The backend URL should come from the configuration of the system.\n backend_url = \"\"\n\n if current_app.config.get(\"PI_UI_DEACTIVATED\"):\n # Do not provide the UI\n return render_template(\"deactivated.html\")\n\n # The default theme. We can change this later\n theme = current_app.config.get(\"PI_CSS\", DEFAULT_THEME)\n # Get further customizations\n customization = current_app.config.get(\"PI_CUSTOMIZATION\",\n \"/static/customize/\")\n customization = customization.strip('/')\n # TODO: we should add the CSS into PI_CUSTOMZATION/css\n # Enrollment-Wizard:\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.pre.bottom.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.top.html\n # PI_CUSTOMIZATION/views/includes/token.enroll.post.bottom.html\n # Get the hidden external links\n external_links = current_app.config.get(\"PI_EXTERNAL_LINKS\", True)\n # Get the logo file\n logo = current_app.config.get(\"PI_LOGO\", \"privacyIDEA1.png\")\n browser_lang = request.accept_languages.best_match([\"en\", \"de\", \"de-DE\"], default=\"en\").split(\"-\")[0]\n # check if login with REMOTE_USER is allowed.\n remote_user = \"\"\n password_reset = False\n if not hasattr(request, \"all_data\"):\n request.all_data = {}\n # Depending on displaying the realm dropdown, we fill realms or not.\n policy_object = PolicyClass()\n realms = \"\"\n client_ip = get_client_ip(request,\n get_from_config(SYSCONF.OVERRIDECLIENT))\n realm_dropdown = policy_object.get_policies(action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip,\n active=True)\n if realm_dropdown:\n try:\n realm_dropdown_values = policy_object.get_action_values(\n action=ACTION.REALMDROPDOWN,\n scope=SCOPE.WEBUI,\n client=client_ip)\n # Use the realms from the policy.\n realms = \",\".join(realm_dropdown_values)\n except AttributeError as ex:\n # The policy is still a boolean realm_dropdown action\n # Thus we display ALL realms\n realms = \",\".join(get_realms())\n if realms:\n realms = \",\" + realms\n\n try:\n if is_remote_user_allowed(request):\n remote_user = request.remote_user\n password_reset = is_password_reset()\n hsm_ready = True\n except HSMException:\n hsm_ready = False\n\n # Use policies to determine the customization of menu\n # and baseline. get_action_values returns an array!\n sub_state = subscription_status()\n customization_menu_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_MENU,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_menu_file) and list(customization_menu_file)[0] \\\n and sub_state not in [1, 2]:\n customization_menu_file = list(customization_menu_file)[0]\n else:\n customization_menu_file = \"templates/menu.html\"\n customization_baseline_file = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.CUSTOM_BASELINE,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True)\n if len(customization_baseline_file) and list(customization_baseline_file)[0] \\\n and sub_state not in [1, 2]:\n customization_baseline_file = list(customization_baseline_file)[0]\n else:\n customization_baseline_file = \"templates/baseline.html\"\n\n login_text = policy_object.get_action_values(\n allow_white_space_in_action=True,\n action=ACTION.LOGIN_TEXT,\n scope=SCOPE.WEBUI,\n client=client_ip, unique=True\n )\n if len(login_text) and list(login_text)[0] and sub_state not in [1, 2]:\n login_text = list(login_text)[0]\n else:\n login_text = \"\"\n\n return render_template(\"index.html\", instance=instance,\n backendUrl=backend_url,\n browser_lang=browser_lang,\n remote_user=remote_user,\n theme=theme,\n password_reset=password_reset,\n hsm_ready=hsm_ready,\n has_job_queue=str(has_job_queue()),\n customization=customization,\n customization_menu_file=customization_menu_file,\n customization_baseline_file=customization_baseline_file,\n realms=realms,\n external_links=external_links,\n login_text=login_text,\n logo=logo)\n\n", "path": "privacyidea/webui/login.py"}]} | 2,534 | 108 |
gh_patches_debug_57587 | rasdani/github-patches | git_diff | joke2k__faker-262 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
en_US SSN provider generates invalid SSNs
There's a few limitations on United States' SSNs that prevent it from being completely random.
- No group can be all 0s
- The SSN cannot start with 666
- The SSN cannot start with a number >= 900
See http://www.ssa.gov/employer/randomization.html
Could you modify the SSN generator to avoid these issues?
</issue>
<code>
[start of faker/providers/ssn/en_US/__init__.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from .. import Provider as BaseProvider
4
5
6 class Provider(BaseProvider):
7 pass
8
[end of faker/providers/ssn/en_US/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py
--- a/faker/providers/ssn/en_US/__init__.py
+++ b/faker/providers/ssn/en_US/__init__.py
@@ -4,4 +4,17 @@
class Provider(BaseProvider):
- pass
+
+ @classmethod
+ def ssn(cls):
+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)
+ # cannot be 666 or 900-999. The group number (middle digits) cannot be
+ # 00. The serial (last 4 digits) cannot be 0000
+ area = BaseProvider.random_int(min=1, max=899)
+ if area == 666:
+ area += 1
+ group = BaseProvider.random_int(1, 99)
+ serial = BaseProvider.random_int(1, 9999)
+
+ ssn = "{0:03d}-{1:02d}-{2:04d}".format(area, group, serial)
+ return ssn
| {"golden_diff": "diff --git a/faker/providers/ssn/en_US/__init__.py b/faker/providers/ssn/en_US/__init__.py\n--- a/faker/providers/ssn/en_US/__init__.py\n+++ b/faker/providers/ssn/en_US/__init__.py\n@@ -4,4 +4,17 @@\n \n \n class Provider(BaseProvider):\n- pass\n+\n+ @classmethod\n+ def ssn(cls):\n+ # Certain numbers are invalid for U.S. SSNs. The area (first 3 digits)\n+ # cannot be 666 or 900-999. The group number (middle digits) cannot be\n+ # 00. The serial (last 4 digits) cannot be 0000\n+ area = BaseProvider.random_int(min=1, max=899)\n+ if area == 666:\n+ area += 1\n+ group = BaseProvider.random_int(1, 99)\n+ serial = BaseProvider.random_int(1, 9999)\n+\n+ ssn = \"{0:03d}-{1:02d}-{2:04d}\".format(area, group, serial)\n+ return ssn\n", "issue": "en_US SSN provider generates invalid SSNs\nThere's a few limitations on United States' SSNs that prevent it from being completely random.\n- No group can be all 0s\n- The SSN cannot start with 666\n- The SSN cannot start with a number >= 900\n\nSee http://www.ssa.gov/employer/randomization.html\n\nCould you modify the SSN generator to avoid these issues?\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom .. import Provider as BaseProvider\n\n\nclass Provider(BaseProvider):\n pass\n", "path": "faker/providers/ssn/en_US/__init__.py"}]} | 676 | 269 |
gh_patches_debug_57390 | rasdani/github-patches | git_diff | translate__pootle-4187 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Change icons for v-folders
To better distinguish virtual folders (or "goals") from regular folders, let's use the following icon:

Preview:

</issue>
<code>
[start of pootle/core/browser.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from django.utils.translation import ugettext_lazy as _
11
12
13 HEADING_CHOICES = [
14 {
15 'id': 'name',
16 'class': 'stats',
17 'display_name': _("Name"),
18 },
19 {
20 'id': 'priority',
21 'class': 'stats-number sorttable_numeric',
22 'display_name': _("Priority"),
23 },
24 {
25 'id': 'project',
26 'class': 'stats',
27 'display_name': _("Project"),
28 },
29 {
30 'id': 'language',
31 'class': 'stats',
32 'display_name': _("Language"),
33 },
34 {
35 'id': 'progress',
36 'class': 'stats',
37 # Translators: noun. The graphical representation of translation status
38 'display_name': _("Progress"),
39 },
40 {
41 'id': 'total',
42 'class': 'stats-number sorttable_numeric when-loaded',
43 # Translators: Heading representing the total number of words of a file
44 # or directory
45 'display_name': _("Total"),
46 },
47 {
48 'id': 'last-updated',
49 'class': 'stats sorttable_numeric when-loaded',
50 'display_name': _("Last updated"),
51 },
52 {
53 'id': 'need-translation',
54 'class': 'stats-number sorttable_numeric when-loaded',
55 'display_name': _("Need Translation"),
56 },
57 {
58 'id': 'suggestions',
59 'class': 'stats-number sorttable_numeric when-loaded',
60 # Translators: The number of suggestions pending review
61 'display_name': _("Suggestions"),
62 },
63 {
64 'id': 'critical',
65 'class': 'stats-number sorttable_numeric when-loaded',
66 'display_name': _("Critical"),
67 },
68 {
69 'id': 'activity',
70 'class': 'stats sorttable_numeric when-loaded',
71 'display_name': _("Last Activity"),
72 },
73 ]
74
75
76 def get_table_headings(choices):
77 """Filters the list of available table headings to the given `choices`."""
78 return filter(lambda x: x['id'] in choices, HEADING_CHOICES)
79
80
81 def make_generic_item(path_obj, **kwargs):
82 """Template variables for each row in the table."""
83 return {
84 'href': path_obj.get_absolute_url(),
85 'href_all': path_obj.get_translate_url(),
86 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),
87 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),
88 'href_critical': path_obj.get_critical_url(**kwargs),
89 'title': path_obj.name,
90 'code': path_obj.code,
91 'is_disabled': getattr(path_obj, 'disabled', False),
92 }
93
94
95 def make_directory_item(directory):
96 filters = {}
97
98 if directory.has_vfolders:
99 # The directory has virtual folders, so append priority sorting to URL.
100 filters['sort'] = 'priority'
101
102 item = make_generic_item(directory, **filters)
103 item.update({
104 'icon': 'folder',
105 })
106 return item
107
108
109 def make_store_item(store):
110 item = make_generic_item(store)
111 item.update({
112 'icon': 'file',
113 })
114 return item
115
116
117 def get_parent(path_obj):
118 """Retrieves a representation of the parent object.
119
120 :param path_obj: either a `Directory` or Store` instance.
121 """
122 parent_dir = path_obj.parent
123
124 if parent_dir.is_project():
125 return None
126
127 if parent_dir.is_language():
128 label = _('Back to language')
129 else:
130 label = _('Back to parent folder')
131
132 return {
133 'title': label,
134 'href': parent_dir.get_absolute_url()
135 }
136
137
138 def make_project_item(translation_project):
139 item = make_generic_item(translation_project)
140 item.update({
141 'icon': 'project',
142 'title': translation_project.project.name,
143 })
144 return item
145
146
147 def make_language_item(translation_project):
148 item = make_generic_item(translation_project)
149 item.update({
150 'icon': 'language',
151 'title': translation_project.language.name,
152 })
153 return item
154
155
156 def make_xlanguage_item(resource_obj):
157 translation_project = resource_obj.translation_project
158 item = make_generic_item(resource_obj)
159 item.update({
160 'icon': 'language',
161 'code': translation_project.language.code,
162 'title': translation_project.language.name,
163 })
164 return item
165
166
167 def make_project_list_item(project):
168 item = make_generic_item(project)
169 item.update({
170 'icon': 'project',
171 'title': project.fullname,
172 })
173 return item
174
175
176 def get_children(directory):
177 """Returns a list of children directories and stores for this
178 ``directory``.
179
180 The elements of the list are dictionaries which keys are populated after
181 in the templates.
182 """
183 directories = [make_directory_item(child_dir)
184 for child_dir in directory.child_dirs.live().iterator()]
185
186 stores = [make_store_item(child_store)
187 for child_store in directory.child_stores.live().iterator()]
188
189 return directories + stores
190
191
192 def make_vfolder_treeitem(vfolder_treeitem):
193 return {
194 'href_all': vfolder_treeitem.get_translate_url(),
195 'href_todo': vfolder_treeitem.get_translate_url(state='incomplete'),
196 'href_sugg': vfolder_treeitem.get_translate_url(state='suggestions'),
197 'href_critical': vfolder_treeitem.get_critical_url(),
198 'title': vfolder_treeitem.vfolder.name,
199 'code': vfolder_treeitem.code,
200 'priority': vfolder_treeitem.vfolder.priority,
201 'is_grayed': not vfolder_treeitem.is_visible,
202 'icon': 'folder',
203 }
204
205
206 def get_vfolders(directory, all_vfolders=False):
207 """Return a list of virtual folders for this ``directory``.
208
209 The elements of the list are dictionaries which keys are populated after
210 in the templates.
211
212 If ``all_vfolders`` is True then all the virtual folders matching the
213 provided directory are returned. If not only the visible ones are returned.
214 """
215 return [make_vfolder_treeitem(vfolder_treeitem)
216 for vfolder_treeitem
217 in directory.vf_treeitems.order_by('-vfolder__priority').iterator()
218 if all_vfolders or vfolder_treeitem.is_visible]
219
[end of pootle/core/browser.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/browser.py b/pootle/core/browser.py
--- a/pootle/core/browser.py
+++ b/pootle/core/browser.py
@@ -199,7 +199,7 @@
'code': vfolder_treeitem.code,
'priority': vfolder_treeitem.vfolder.priority,
'is_grayed': not vfolder_treeitem.is_visible,
- 'icon': 'folder',
+ 'icon': 'vfolder',
}
| {"golden_diff": "diff --git a/pootle/core/browser.py b/pootle/core/browser.py\n--- a/pootle/core/browser.py\n+++ b/pootle/core/browser.py\n@@ -199,7 +199,7 @@\n 'code': vfolder_treeitem.code,\n 'priority': vfolder_treeitem.vfolder.priority,\n 'is_grayed': not vfolder_treeitem.is_visible,\n- 'icon': 'folder',\n+ 'icon': 'vfolder',\n }\n", "issue": "Change icons for v-folders\nTo better distinguish virtual folders (or \"goals\") from regular folders, let's use the following icon:\n\n\n\nPreview:\n\n\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nHEADING_CHOICES = [\n {\n 'id': 'name',\n 'class': 'stats',\n 'display_name': _(\"Name\"),\n },\n {\n 'id': 'priority',\n 'class': 'stats-number sorttable_numeric',\n 'display_name': _(\"Priority\"),\n },\n {\n 'id': 'project',\n 'class': 'stats',\n 'display_name': _(\"Project\"),\n },\n {\n 'id': 'language',\n 'class': 'stats',\n 'display_name': _(\"Language\"),\n },\n {\n 'id': 'progress',\n 'class': 'stats',\n # Translators: noun. The graphical representation of translation status\n 'display_name': _(\"Progress\"),\n },\n {\n 'id': 'total',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: Heading representing the total number of words of a file\n # or directory\n 'display_name': _(\"Total\"),\n },\n {\n 'id': 'last-updated',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last updated\"),\n },\n {\n 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'suggestions',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: The number of suggestions pending review\n 'display_name': _(\"Suggestions\"),\n },\n {\n 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'activity',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last Activity\"),\n },\n]\n\n\ndef get_table_headings(choices):\n \"\"\"Filters the list of available table headings to the given `choices`.\"\"\"\n return filter(lambda x: x['id'] in choices, HEADING_CHOICES)\n\n\ndef make_generic_item(path_obj, **kwargs):\n \"\"\"Template variables for each row in the table.\"\"\"\n return {\n 'href': path_obj.get_absolute_url(),\n 'href_all': path_obj.get_translate_url(),\n 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),\n 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),\n 'href_critical': path_obj.get_critical_url(**kwargs),\n 'title': path_obj.name,\n 'code': path_obj.code,\n 'is_disabled': getattr(path_obj, 'disabled', False),\n }\n\n\ndef make_directory_item(directory):\n filters = {}\n\n if directory.has_vfolders:\n # The directory has virtual folders, so append priority sorting to URL.\n filters['sort'] = 'priority'\n\n item = make_generic_item(directory, **filters)\n item.update({\n 'icon': 'folder',\n })\n return item\n\n\ndef make_store_item(store):\n item = make_generic_item(store)\n item.update({\n 'icon': 'file',\n })\n return item\n\n\ndef get_parent(path_obj):\n \"\"\"Retrieves a representation of the parent object.\n\n :param path_obj: either a `Directory` or Store` instance.\n \"\"\"\n parent_dir = path_obj.parent\n\n if parent_dir.is_project():\n return None\n\n if parent_dir.is_language():\n label = _('Back to language')\n else:\n label = _('Back to parent folder')\n\n return {\n 'title': label,\n 'href': parent_dir.get_absolute_url()\n }\n\n\ndef make_project_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'project',\n 'title': translation_project.project.name,\n })\n return item\n\n\ndef make_language_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'language',\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_xlanguage_item(resource_obj):\n translation_project = resource_obj.translation_project\n item = make_generic_item(resource_obj)\n item.update({\n 'icon': 'language',\n 'code': translation_project.language.code,\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_project_list_item(project):\n item = make_generic_item(project)\n item.update({\n 'icon': 'project',\n 'title': project.fullname,\n })\n return item\n\n\ndef get_children(directory):\n \"\"\"Returns a list of children directories and stores for this\n ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n \"\"\"\n directories = [make_directory_item(child_dir)\n for child_dir in directory.child_dirs.live().iterator()]\n\n stores = [make_store_item(child_store)\n for child_store in directory.child_stores.live().iterator()]\n\n return directories + stores\n\n\ndef make_vfolder_treeitem(vfolder_treeitem):\n return {\n 'href_all': vfolder_treeitem.get_translate_url(),\n 'href_todo': vfolder_treeitem.get_translate_url(state='incomplete'),\n 'href_sugg': vfolder_treeitem.get_translate_url(state='suggestions'),\n 'href_critical': vfolder_treeitem.get_critical_url(),\n 'title': vfolder_treeitem.vfolder.name,\n 'code': vfolder_treeitem.code,\n 'priority': vfolder_treeitem.vfolder.priority,\n 'is_grayed': not vfolder_treeitem.is_visible,\n 'icon': 'folder',\n }\n\n\ndef get_vfolders(directory, all_vfolders=False):\n \"\"\"Return a list of virtual folders for this ``directory``.\n\n The elements of the list are dictionaries which keys are populated after\n in the templates.\n\n If ``all_vfolders`` is True then all the virtual folders matching the\n provided directory are returned. If not only the visible ones are returned.\n \"\"\"\n return [make_vfolder_treeitem(vfolder_treeitem)\n for vfolder_treeitem\n in directory.vf_treeitems.order_by('-vfolder__priority').iterator()\n if all_vfolders or vfolder_treeitem.is_visible]\n", "path": "pootle/core/browser.py"}]} | 2,670 | 105 |
gh_patches_debug_57082 | rasdani/github-patches | git_diff | SeldonIO__MLServer-1171 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add OS constraint in PyPI
Mention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org.
```
setup(...,
classifiers=[
'Operating System :: POSIX',
],
)
```
_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_
</issue>
<code>
[start of setup.py]
1 import os
2
3 from typing import Dict
4 from setuptools import setup, find_packages
5
6 ROOT_PATH = os.path.dirname(__file__)
7 PKG_NAME = "mlserver"
8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)
9
10
11 def _load_version() -> str:
12 version = ""
13 version_path = os.path.join(PKG_PATH, "version.py")
14 with open(version_path) as fp:
15 version_module: Dict[str, str] = {}
16 exec(fp.read(), version_module)
17 version = version_module["__version__"]
18
19 return version
20
21
22 def _load_description() -> str:
23 readme_path = os.path.join(ROOT_PATH, "README.md")
24 with open(readme_path) as fp:
25 return fp.read()
26
27
28 env_marker_cpython = (
29 "sys_platform != 'win32'"
30 " and (sys_platform != 'cygwin'"
31 " and platform_python_implementation != 'PyPy')"
32 )
33
34 setup(
35 name=PKG_NAME,
36 version=_load_version(),
37 url="https://github.com/SeldonIO/MLServer.git",
38 author="Seldon Technologies Ltd.",
39 author_email="[email protected]",
40 description="ML server",
41 include_package_data=True,
42 packages=find_packages(exclude=["tests", "tests.*"]),
43 install_requires=[
44 "click",
45 # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861
46 "fastapi >=0.88.0, <=0.89.1, !=0.89.0",
47 "python-dotenv",
48 "grpcio",
49 # The importlib-resources backport is required to use some
50 # functionality added in Python 3.10
51 # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime
52 "importlib-resources",
53 "numpy",
54 "pandas",
55 "protobuf",
56 "uvicorn",
57 "starlette_exporter",
58 "py-grpc-prometheus",
59 "uvloop;" + env_marker_cpython,
60 "aiokafka",
61 "tritonclient[http]>=2.24",
62 "aiofiles",
63 "orjson",
64 ],
65 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
66 long_description=_load_description(),
67 long_description_content_type="text/markdown",
68 license="Apache 2.0",
69 )
70
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,6 +37,7 @@
url="https://github.com/SeldonIO/MLServer.git",
author="Seldon Technologies Ltd.",
author_email="[email protected]",
+ classifiers=["Operating System :: POSIX", "Operating System :: MacOS"],
description="ML server",
include_package_data=True,
packages=find_packages(exclude=["tests", "tests.*"]),
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n+ classifiers=[\"Operating System :: POSIX\", \"Operating System :: MacOS\"],\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n", "issue": "Add OS constraint in PyPI\nMention MLServer's OS constraints as metadata in `setup.py`, so that it becomes visible in pypi.org. \r\n\r\n```\r\nsetup(...,\r\n classifiers=[\r\n 'Operating System :: POSIX',\r\n ],\r\n )\r\n```\r\n\r\n_Originally posted by @HugoMVale in https://github.com/SeldonIO/MLServer/issues/1022#issuecomment-1456788132_\r\n \n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n include_package_data=True,\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"click\",\n # 0.89.0: https://github.com/tiangolo/fastapi/issues/5861\n \"fastapi >=0.88.0, <=0.89.1, !=0.89.0\",\n \"python-dotenv\",\n \"grpcio\",\n # The importlib-resources backport is required to use some\n # functionality added in Python 3.10\n # https://setuptools.pypa.io/en/latest/userguide/datafiles.html#accessing-data-files-at-runtime\n \"importlib-resources\",\n \"numpy\",\n \"pandas\",\n \"protobuf\",\n \"uvicorn\",\n \"starlette_exporter\",\n \"py-grpc-prometheus\",\n \"uvloop;\" + env_marker_cpython,\n \"aiokafka\",\n \"tritonclient[http]>=2.24\",\n \"aiofiles\",\n \"orjson\",\n ],\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]} | 1,282 | 104 |
gh_patches_debug_41531 | rasdani/github-patches | git_diff | deepset-ai__haystack-7247 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docstrings - `haystack.components.caching`
</issue>
<code>
[start of haystack/components/caching/cache_checker.py]
1 from typing import List, Dict, Any
2
3 import importlib
4
5 import logging
6
7 from haystack import component, Document, default_from_dict, default_to_dict, DeserializationError
8 from haystack.document_stores.types import DocumentStore
9
10
11 logger = logging.getLogger(__name__)
12
13
14 @component
15 class CacheChecker:
16 """
17 CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
18 cache field.
19 """
20
21 def __init__(self, document_store: DocumentStore, cache_field: str):
22 """
23 Create a UrlCacheChecker component.
24 """
25 self.document_store = document_store
26 self.cache_field = cache_field
27
28 def to_dict(self) -> Dict[str, Any]:
29 """
30 Serialize this component to a dictionary.
31 """
32 return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
33
34 @classmethod
35 def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
36 """
37 Deserialize this component from a dictionary.
38 """
39 init_params = data.get("init_parameters", {})
40 if "document_store" not in init_params:
41 raise DeserializationError("Missing 'document_store' in serialization data")
42 if "type" not in init_params["document_store"]:
43 raise DeserializationError("Missing 'type' in document store's serialization data")
44
45 try:
46 module_name, type_ = init_params["document_store"]["type"].rsplit(".", 1)
47 logger.debug("Trying to import %s", module_name)
48 module = importlib.import_module(module_name)
49 except (ImportError, DeserializationError) as e:
50 raise DeserializationError(
51 f"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported"
52 ) from e
53
54 docstore_class = getattr(module, type_)
55 docstore = docstore_class.from_dict(init_params["document_store"])
56
57 data["init_parameters"]["document_store"] = docstore
58 return default_from_dict(cls, data)
59
60 @component.output_types(hits=List[Document], misses=List)
61 def run(self, items: List[Any]):
62 """
63 Checks if any document associated with the specified field is already present in the store. If matching documents
64 are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
65
66 :param items: A list of values associated with the cache_field to be checked against the cache.
67 :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
68 the cache and items that were not, respectively.
69 """
70 found_documents = []
71 misses = []
72
73 for item in items:
74 filters = {self.cache_field: item}
75 found = self.document_store.filter_documents(filters=filters)
76 if found:
77 found_documents.extend(found)
78 else:
79 misses.append(item)
80 return {"hits": found_documents, "misses": misses}
81
[end of haystack/components/caching/cache_checker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py
--- a/haystack/components/caching/cache_checker.py
+++ b/haystack/components/caching/cache_checker.py
@@ -14,27 +14,63 @@
@component
class CacheChecker:
"""
- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified
- cache field.
+ Checks for the presence of documents in a Document Store based on a specified
+ field in each document's metadata.
+
+ If matching documents are found, they are returned as hits. If not, the items
+ are returned as misses, indicating they are not in the cache.
+
+ Usage example:
+ ```python
+ from haystack import Document
+ from haystack.document_stores.in_memory import InMemoryDocumentStore
+ from haystack.components.caching.cache_checker import CacheChecker
+
+ docstore = InMemoryDocumentStore()
+ documents = [
+ Document(content="doc1", meta={"url": "https://example.com/1"}),
+ Document(content="doc2", meta={"url": "https://example.com/2"}),
+ Document(content="doc3", meta={"url": "https://example.com/1"}),
+ Document(content="doc4", meta={"url": "https://example.com/2"}),
+ ]
+ docstore.write_documents(documents)
+ checker = CacheChecker(docstore, cache_field="url")
+ results = checker.run(items=["https://example.com/1", "https://example.com/5"])
+ assert results == {"hits": [documents[0], documents[2]], "misses": ["https://example.com/5"]}
+ ```
"""
def __init__(self, document_store: DocumentStore, cache_field: str):
"""
- Create a UrlCacheChecker component.
+ Create a CacheChecker component.
+
+ :param document_store:
+ Document store to check.
+ :param cache_field:
+ Name of the Document metadata field
+ to check for cache hits.
"""
self.document_store = document_store
self.cache_field = cache_field
def to_dict(self) -> Dict[str, Any]:
"""
- Serialize this component to a dictionary.
+ Serializes the component to a dictionary.
+
+ :returns:
+ Dictionary with serialized data.
"""
return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "CacheChecker":
"""
- Deserialize this component from a dictionary.
+ Deserializes the component from a dictionary.
+
+ :param data:
+ Dictionary to deserialize from.
+ :returns:
+ Deserialized component.
"""
init_params = data.get("init_parameters", {})
if "document_store" not in init_params:
@@ -60,12 +96,15 @@
@component.output_types(hits=List[Document], misses=List)
def run(self, items: List[Any]):
"""
- Checks if any document associated with the specified field is already present in the store. If matching documents
- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.
-
- :param items: A list of values associated with the cache_field to be checked against the cache.
- :return: A dictionary with two keys: "hits" and "misses". The values are lists of documents that were found in
- the cache and items that were not, respectively.
+ Checks if any document associated with the specified cache field
+ is already present in the store.
+
+ :param items:
+ Values to be checked against the cache field.
+ :return:
+ A dictionary with two keys:
+ - `hits` - Documents that matched with any of the items.
+ - `misses` - Items that were not present in any documents.
"""
found_documents = []
misses = []
| {"golden_diff": "diff --git a/haystack/components/caching/cache_checker.py b/haystack/components/caching/cache_checker.py\n--- a/haystack/components/caching/cache_checker.py\n+++ b/haystack/components/caching/cache_checker.py\n@@ -14,27 +14,63 @@\n @component\n class CacheChecker:\n \"\"\"\n- CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n- cache field.\n+ Checks for the presence of documents in a Document Store based on a specified\n+ field in each document's metadata.\n+\n+ If matching documents are found, they are returned as hits. If not, the items\n+ are returned as misses, indicating they are not in the cache.\n+\n+ Usage example:\n+ ```python\n+ from haystack import Document\n+ from haystack.document_stores.in_memory import InMemoryDocumentStore\n+ from haystack.components.caching.cache_checker import CacheChecker\n+\n+ docstore = InMemoryDocumentStore()\n+ documents = [\n+ Document(content=\"doc1\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc2\", meta={\"url\": \"https://example.com/2\"}),\n+ Document(content=\"doc3\", meta={\"url\": \"https://example.com/1\"}),\n+ Document(content=\"doc4\", meta={\"url\": \"https://example.com/2\"}),\n+ ]\n+ docstore.write_documents(documents)\n+ checker = CacheChecker(docstore, cache_field=\"url\")\n+ results = checker.run(items=[\"https://example.com/1\", \"https://example.com/5\"])\n+ assert results == {\"hits\": [documents[0], documents[2]], \"misses\": [\"https://example.com/5\"]}\n+ ```\n \"\"\"\n \n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n- Create a UrlCacheChecker component.\n+ Create a CacheChecker component.\n+\n+ :param document_store:\n+ Document store to check.\n+ :param cache_field:\n+ Name of the Document metadata field\n+ to check for cache hits.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n \n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n- Serialize this component to a dictionary.\n+ Serializes the component to a dictionary.\n+\n+ :returns:\n+ Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n \n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n- Deserialize this component from a dictionary.\n+ Deserializes the component from a dictionary.\n+\n+ :param data:\n+ Dictionary to deserialize from.\n+ :returns:\n+ Deserialized component.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n@@ -60,12 +96,15 @@\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n- Checks if any document associated with the specified field is already present in the store. If matching documents\n- are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n-\n- :param items: A list of values associated with the cache_field to be checked against the cache.\n- :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n- the cache and items that were not, respectively.\n+ Checks if any document associated with the specified cache field\n+ is already present in the store.\n+\n+ :param items:\n+ Values to be checked against the cache field.\n+ :return:\n+ A dictionary with two keys:\n+ - `hits` - Documents that matched with any of the items.\n+ - `misses` - Items that were not present in any documents.\n \"\"\"\n found_documents = []\n misses = []\n", "issue": "Docstrings - `haystack.components.caching`\n\n", "before_files": [{"content": "from typing import List, Dict, Any\n\nimport importlib\n\nimport logging\n\nfrom haystack import component, Document, default_from_dict, default_to_dict, DeserializationError\nfrom haystack.document_stores.types import DocumentStore\n\n\nlogger = logging.getLogger(__name__)\n\n\n@component\nclass CacheChecker:\n \"\"\"\n CacheChecker is a component that checks for the presence of documents in a Document Store based on a specified\n cache field.\n \"\"\"\n\n def __init__(self, document_store: DocumentStore, cache_field: str):\n \"\"\"\n Create a UrlCacheChecker component.\n \"\"\"\n self.document_store = document_store\n self.cache_field = cache_field\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n \"\"\"\n return default_to_dict(self, document_store=self.document_store.to_dict(), cache_field=self.cache_field)\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> \"CacheChecker\":\n \"\"\"\n Deserialize this component from a dictionary.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if \"document_store\" not in init_params:\n raise DeserializationError(\"Missing 'document_store' in serialization data\")\n if \"type\" not in init_params[\"document_store\"]:\n raise DeserializationError(\"Missing 'type' in document store's serialization data\")\n\n try:\n module_name, type_ = init_params[\"document_store\"][\"type\"].rsplit(\".\", 1)\n logger.debug(\"Trying to import %s\", module_name)\n module = importlib.import_module(module_name)\n except (ImportError, DeserializationError) as e:\n raise DeserializationError(\n f\"DocumentStore of type '{init_params['document_store']['type']}' not correctly imported\"\n ) from e\n\n docstore_class = getattr(module, type_)\n docstore = docstore_class.from_dict(init_params[\"document_store\"])\n\n data[\"init_parameters\"][\"document_store\"] = docstore\n return default_from_dict(cls, data)\n\n @component.output_types(hits=List[Document], misses=List)\n def run(self, items: List[Any]):\n \"\"\"\n Checks if any document associated with the specified field is already present in the store. If matching documents\n are found, they are returned as hits. If not, the items are returned as misses, indicating they are not in the cache.\n\n :param items: A list of values associated with the cache_field to be checked against the cache.\n :return: A dictionary with two keys: \"hits\" and \"misses\". The values are lists of documents that were found in\n the cache and items that were not, respectively.\n \"\"\"\n found_documents = []\n misses = []\n\n for item in items:\n filters = {self.cache_field: item}\n found = self.document_store.filter_documents(filters=filters)\n if found:\n found_documents.extend(found)\n else:\n misses.append(item)\n return {\"hits\": found_documents, \"misses\": misses}\n", "path": "haystack/components/caching/cache_checker.py"}]} | 1,347 | 899 |
gh_patches_debug_19709 | rasdani/github-patches | git_diff | fossasia__open-event-server-5615 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to login to the Admin Panel.
**Description:**
When we try to login to admin panel, even if the credentials belong to super-admin, it returns "Credential incorrect"
**Steps to reproduce the behavior:**
1. Go to 127.0.0.1:5000/admin
2. Enter the admin credentials
3. Click on login button
4. See error "Credential incorrect"
**Expected Behaviour:**
It should login the user to the admin panel if credentials are correct and the user is an admin.
**Screenshots**

</issue>
<code>
[start of app/views/__init__.py]
1 import flask_login as login
2 import requests
3 from flask import url_for, redirect, Blueprint, request, make_response
4 from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
5 from flask_admin.contrib.sqla import ModelView
6 from flask_scrypt import generate_password_hash
7 from wtforms import form, fields, validators
8
9 from app.models import db
10 from app.models.user import User
11
12
13 class AdminModelView(ModelView):
14 def is_accessible(self):
15 return login.current_user.is_authenticated
16
17 def inaccessible_callback(self, name, **kwargs):
18 # redirect to login page if user doesn't have access
19 return redirect(url_for('admin.index', next=request.url))
20
21
22 class LoginForm(form.Form):
23 login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={"placeholder": "[email protected]"})
24 password = fields.PasswordField(validators=[validators.required()], render_kw={"placeholder": "xyzzy"})
25
26 def validate_login(self, field):
27 """
28 validate login
29 :param field:
30 :return:
31 """
32 user = self.get_user()
33
34 if user is None:
35 raise validators.ValidationError('User does not exist.')
36
37 if user.password != generate_password_hash(self.password.data, user.salt):
38 raise validators.ValidationError('Credentials incorrect.')
39
40 if not user.is_admin and not user.is_super_admin:
41 raise validators.ValidationError('Access Forbidden. Admin Rights Required')
42
43 def get_user(self):
44 return User.query.filter_by(email=self.login.data).first()
45
46
47 class MyAdminIndexView(AdminIndexView):
48 @expose('/')
49 def index(self):
50 """
51 /admin
52 :return:
53 """
54 if not login.current_user.is_authenticated:
55 return redirect(url_for('.login_view'))
56 return super(MyAdminIndexView, self).index()
57
58 @expose('/login/', methods=('GET', 'POST'))
59 def login_view(self):
60 """
61 login view for flask-admin
62 :return:
63 """
64 # handle user login
65 form = LoginForm(request.form)
66 if admin_helpers.validate_form_on_submit(form):
67 user = form.get_user()
68 login.login_user(user)
69
70 if login.current_user.is_authenticated:
71 return redirect(url_for('.index'))
72 self._template_args['form'] = form
73 return super(MyAdminIndexView, self).index()
74
75 @expose('/logout/')
76 def logout_view(self):
77 login.logout_user()
78 return redirect(url_for('.index'))
79
80
81 home_routes = Blueprint('home', __name__)
82
83
84 # Flask views
85 @home_routes.route('/')
86 def index():
87 """
88 Index route
89 :return:
90 """
91 r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')
92 response = make_response(r.content)
93 response.headers["Content-Type"] = "text/html"
94 return response
95
96
97 class BlueprintsManager:
98 def __init__(self):
99 pass
100
101 @staticmethod
102 def register(app):
103 """
104 Register blueprints
105 :param app: a flask app instance
106 :return:
107 """
108 app.register_blueprint(home_routes)
109 admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),
110 base_template='admin_base.html')
111
112 # Get all the models in the db, all models should have a explicit __tablename__
113 classes, models, table_names = [], [], []
114 # noinspection PyProtectedMember
115 for class_ in list(db.Model._decl_class_registry.values()):
116 try:
117 table_names.append(class_.__tablename__)
118 classes.append(class_)
119 except:
120 pass
121 for table in list(db.metadata.tables.items()):
122 if table[0] in table_names:
123 models.append(classes[table_names.index(table[0])])
124
125 for model in models:
126 admin.add_view(AdminModelView(model, db.session))
127
[end of app/views/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/views/__init__.py b/app/views/__init__.py
--- a/app/views/__init__.py
+++ b/app/views/__init__.py
@@ -3,7 +3,6 @@
from flask import url_for, redirect, Blueprint, request, make_response
from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers
from flask_admin.contrib.sqla import ModelView
-from flask_scrypt import generate_password_hash
from wtforms import form, fields, validators
from app.models import db
@@ -34,7 +33,7 @@
if user is None:
raise validators.ValidationError('User does not exist.')
- if user.password != generate_password_hash(self.password.data, user.salt):
+ if not user.is_correct_password(self.password.data):
raise validators.ValidationError('Credentials incorrect.')
if not user.is_admin and not user.is_super_admin:
| {"golden_diff": "diff --git a/app/views/__init__.py b/app/views/__init__.py\n--- a/app/views/__init__.py\n+++ b/app/views/__init__.py\n@@ -3,7 +3,6 @@\n from flask import url_for, redirect, Blueprint, request, make_response\n from flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\n from flask_admin.contrib.sqla import ModelView\n-from flask_scrypt import generate_password_hash\n from wtforms import form, fields, validators\n \n from app.models import db\n@@ -34,7 +33,7 @@\n if user is None:\n raise validators.ValidationError('User does not exist.')\n \n- if user.password != generate_password_hash(self.password.data, user.salt):\n+ if not user.is_correct_password(self.password.data):\n raise validators.ValidationError('Credentials incorrect.')\n \n if not user.is_admin and not user.is_super_admin:\n", "issue": "Unable to login to the Admin Panel.\n**Description:**\r\nWhen we try to login to admin panel, even if the credentials belong to super-admin, it returns \"Credential incorrect\"\r\n\r\n**Steps to reproduce the behavior:**\r\n1. Go to 127.0.0.1:5000/admin\r\n2. Enter the admin credentials\r\n3. Click on login button\r\n4. See error \"Credential incorrect\"\r\n\r\n**Expected Behaviour:**\r\nIt should login the user to the admin panel if credentials are correct and the user is an admin.\r\n\r\n**Screenshots**\r\n\r\n\n", "before_files": [{"content": "import flask_login as login\nimport requests\nfrom flask import url_for, redirect, Blueprint, request, make_response\nfrom flask_admin import Admin, AdminIndexView, expose, helpers as admin_helpers\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_scrypt import generate_password_hash\nfrom wtforms import form, fields, validators\n\nfrom app.models import db\nfrom app.models.user import User\n\n\nclass AdminModelView(ModelView):\n def is_accessible(self):\n return login.current_user.is_authenticated\n\n def inaccessible_callback(self, name, **kwargs):\n # redirect to login page if user doesn't have access\n return redirect(url_for('admin.index', next=request.url))\n\n\nclass LoginForm(form.Form):\n login = fields.TextField(validators=[validators.required(), validators.email()], render_kw={\"placeholder\": \"[email protected]\"})\n password = fields.PasswordField(validators=[validators.required()], render_kw={\"placeholder\": \"xyzzy\"})\n\n def validate_login(self, field):\n \"\"\"\n validate login\n :param field:\n :return:\n \"\"\"\n user = self.get_user()\n\n if user is None:\n raise validators.ValidationError('User does not exist.')\n\n if user.password != generate_password_hash(self.password.data, user.salt):\n raise validators.ValidationError('Credentials incorrect.')\n\n if not user.is_admin and not user.is_super_admin:\n raise validators.ValidationError('Access Forbidden. Admin Rights Required')\n\n def get_user(self):\n return User.query.filter_by(email=self.login.data).first()\n\n\nclass MyAdminIndexView(AdminIndexView):\n @expose('/')\n def index(self):\n \"\"\"\n /admin\n :return:\n \"\"\"\n if not login.current_user.is_authenticated:\n return redirect(url_for('.login_view'))\n return super(MyAdminIndexView, self).index()\n\n @expose('/login/', methods=('GET', 'POST'))\n def login_view(self):\n \"\"\"\n login view for flask-admin\n :return:\n \"\"\"\n # handle user login\n form = LoginForm(request.form)\n if admin_helpers.validate_form_on_submit(form):\n user = form.get_user()\n login.login_user(user)\n\n if login.current_user.is_authenticated:\n return redirect(url_for('.index'))\n self._template_args['form'] = form\n return super(MyAdminIndexView, self).index()\n\n @expose('/logout/')\n def logout_view(self):\n login.logout_user()\n return redirect(url_for('.index'))\n\n\nhome_routes = Blueprint('home', __name__)\n\n\n# Flask views\n@home_routes.route('/')\ndef index():\n \"\"\"\n Index route\n :return:\n \"\"\"\n r = requests.get('https://raw.githubusercontent.com/fossasia/open-event-server/gh-pages/api/v1/index.html')\n response = make_response(r.content)\n response.headers[\"Content-Type\"] = \"text/html\"\n return response\n\n\nclass BlueprintsManager:\n def __init__(self):\n pass\n\n @staticmethod\n def register(app):\n \"\"\"\n Register blueprints\n :param app: a flask app instance\n :return:\n \"\"\"\n app.register_blueprint(home_routes)\n admin = Admin(app, name='Open Event API', template_mode='bootstrap3', index_view=MyAdminIndexView(),\n base_template='admin_base.html')\n\n # Get all the models in the db, all models should have a explicit __tablename__\n classes, models, table_names = [], [], []\n # noinspection PyProtectedMember\n for class_ in list(db.Model._decl_class_registry.values()):\n try:\n table_names.append(class_.__tablename__)\n classes.append(class_)\n except:\n pass\n for table in list(db.metadata.tables.items()):\n if table[0] in table_names:\n models.append(classes[table_names.index(table[0])])\n\n for model in models:\n admin.add_view(AdminModelView(model, db.session))\n", "path": "app/views/__init__.py"}]} | 1,823 | 193 |
gh_patches_debug_34291 | rasdani/github-patches | git_diff | deepset-ai__haystack-2908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DPR training is broken
**Describe the bug**
It seems that DPR training does not work at the moment. I suspect that this bug was introduced by #2703.
**Error message**
```
File "/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py", line 92, in <module>
tutorial9_dpr_training()
File "/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py", line 71, in tutorial9_dpr_training
retriever.train(
File "/Users/bogdan/Repositories/haystack/haystack/nodes/retriever/dense.py", line 680, in train
trainer.train()
File "/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py", line 290, in train
loss = self.compute_loss(batch, step)
File "/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py", line 374, in compute_loss
logits = self.model.forward(**batch)
TypeError: forward() got an unexpected keyword argument 'label_ids'
```
**To Reproduce**
Execute Tutorial 9.
</issue>
<code>
[start of haystack/modeling/evaluation/eval.py]
1 from typing import Dict, List, Optional, Any
2
3 import logging
4 import numbers
5 import torch
6 import numpy as np
7 from tqdm import tqdm
8
9 from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics
10 from haystack.modeling.model.adaptive_model import AdaptiveModel
11 from haystack.utils.experiment_tracking import Tracker as tracker
12 from haystack.modeling.visual import BUSH_SEP
13
14
15 logger = logging.getLogger(__name__)
16
17
18 class Evaluator:
19 """
20 Handles evaluation of a given model over a specified dataset.
21 """
22
23 def __init__(self, data_loader: torch.utils.data.DataLoader, tasks, device: torch.device, report: bool = True):
24 """
25 :param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset
26 :param tesks:
27 :param device: The device on which the tensors should be processed. Choose from torch.device("cpu") and torch.device("cuda").
28 :param report: Whether an eval report should be generated (e.g. classification report per class).
29 """
30 self.data_loader = data_loader
31 self.tasks = tasks
32 self.device = device
33 self.report = report
34
35 def eval(
36 self,
37 model: AdaptiveModel,
38 return_preds_and_labels: bool = False,
39 calibrate_conf_scores: bool = False,
40 use_confidence_scores_for_ranking=True,
41 use_no_answer_legacy_confidence=False,
42 ) -> List[Dict]:
43 """
44 Performs evaluation on a given model.
45
46 :param model: The model on which to perform evaluation
47 :param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the
48 :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores
49 :param use_confidence_scores_for_ranking: Whether to sort answers by confidence score (normalized between 0 and 1)(default) or by standard score (unbounded).
50 :param use_no_answer_legacy_confidence: Whether to use the legacy confidence definition for no_answer: difference between the best overall answer confidence and the no_answer gap confidence.
51 Otherwise we use the no_answer score normalized to a range of [0,1] by an expit function (default).
52 :return: all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics
53 and reports generated during evaluation.
54 """
55 model.prediction_heads[0].use_confidence_scores_for_ranking = use_confidence_scores_for_ranking
56 model.prediction_heads[0].use_no_answer_legacy_confidence = use_no_answer_legacy_confidence
57 model.eval()
58
59 # init empty lists per prediction head
60 loss_all: List = [0 for _ in model.prediction_heads]
61 preds_all: List = [[] for _ in model.prediction_heads]
62 label_all: List = [[] for _ in model.prediction_heads]
63 ids_all: List = [[] for _ in model.prediction_heads]
64 passage_start_t_all: List = [[] for _ in model.prediction_heads]
65 logits_all: List = [[] for _ in model.prediction_heads]
66
67 for step, batch in enumerate(tqdm(self.data_loader, desc="Evaluating", mininterval=10)):
68 batch = {key: batch[key].to(self.device) for key in batch}
69
70 with torch.no_grad():
71
72 logits = model.forward(
73 input_ids=batch.get("input_ids", None),
74 segment_ids=batch.get("segment_ids", None),
75 padding_mask=batch.get("padding_mask", None),
76 output_hidden_states=batch.get("output_hidden_states", False),
77 output_attentions=batch.get("output_attentions", False),
78 )
79 losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
80 preds = model.logits_to_preds(logits=logits, **batch)
81 labels = model.prepare_labels(**batch)
82
83 # stack results of all batches per prediction head
84 for head_num, head in enumerate(model.prediction_heads):
85 loss_all[head_num] += np.sum(_to_numpy(losses_per_head[head_num]))
86 preds_all[head_num] += list(_to_numpy(preds[head_num]))
87 label_all[head_num] += list(_to_numpy(labels[head_num]))
88 if head.model_type == "span_classification":
89 ids_all[head_num] += list(_to_numpy(batch["id"]))
90 passage_start_t_all[head_num] += list(_to_numpy(batch["passage_start_t"]))
91 if calibrate_conf_scores:
92 logits_all[head_num] += list(_to_numpy(logits))
93
94 # Evaluate per prediction head
95 all_results = []
96 for head_num, head in enumerate(model.prediction_heads):
97 if head.model_type == "span_classification" and calibrate_conf_scores:
98 temperature_previous = head.temperature_for_confidence.item()
99 logger.info(f"temperature used for confidence scores before calibration: {temperature_previous}")
100 head.calibrate_conf(logits_all[head_num], label_all[head_num])
101 temperature_current = head.temperature_for_confidence.item()
102 logger.info(f"temperature used for confidence scores after calibration: {temperature_current}")
103 temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0
104 if temperature_change > 50:
105 logger.warning(
106 f"temperature used for calibration of confidence scores changed by more than {temperature_change} percent"
107 )
108 if hasattr(head, "aggregate_preds"):
109 # Needed to convert NQ ids from np arrays to strings
110 ids_all_str = [x.astype(str) for x in ids_all[head_num]]
111 ids_all_list = [list(x) for x in ids_all_str]
112 head_ids = ["-".join(x) for x in ids_all_list]
113 preds_all[head_num], label_all[head_num] = head.aggregate_preds(
114 preds=preds_all[head_num],
115 labels=label_all[head_num],
116 passage_start_t=passage_start_t_all[head_num],
117 ids=head_ids,
118 )
119 result = {"loss": loss_all[head_num] / len(self.data_loader.dataset), "task_name": head.task_name}
120 result.update(compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]))
121 # Select type of report depending on prediction head output type
122 if self.report:
123 try:
124 result["report"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])
125 except:
126 logger.error(
127 f"Couldn't create eval report for head {head_num} with following preds and labels:"
128 f"\n Preds: {preds_all[head_num]} \n Labels: {label_all[head_num]}"
129 )
130 result["report"] = "Error"
131
132 if return_preds_and_labels:
133 result["preds"] = preds_all[head_num]
134 result["labels"] = label_all[head_num]
135
136 all_results.append(result)
137
138 return all_results
139
140 @staticmethod
141 def log_results(
142 results: List[Any],
143 dataset_name: str,
144 steps: int,
145 logging: bool = True,
146 print: bool = True,
147 num_fold: Optional[int] = None,
148 ):
149 # Print a header
150 header = "\n\n"
151 header += BUSH_SEP + "\n"
152 header += "***************************************************\n"
153 if num_fold:
154 header += (
155 f"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
156 )
157 else:
158 header += f"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\n"
159 header += "***************************************************\n"
160 header += BUSH_SEP + "\n"
161 logger.info(header)
162
163 for head_num, head in enumerate(results):
164 logger.info("\n _________ {} _________".format(head["task_name"]))
165 for metric_name, metric_val in head.items():
166 # log with experiment tracking framework (e.g. Mlflow)
167 if logging:
168 if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
169 if isinstance(metric_val, numbers.Number):
170 tracker.track_metrics(
171 metrics={f"{dataset_name}_{metric_name}_{head['task_name']}": metric_val}, step=steps
172 )
173 # print via standard python logger
174 if print:
175 if metric_name == "report":
176 if isinstance(metric_val, str) and len(metric_val) > 8000:
177 metric_val = metric_val[:7500] + "\n ............................. \n" + metric_val[-500:]
178 logger.info("{}: \n {}".format(metric_name, metric_val))
179 else:
180 if not metric_name in ["preds", "labels"] and not metric_name.startswith("_"):
181 logger.info("{}: {}".format(metric_name, metric_val))
182
183
184 def _to_numpy(container):
185 try:
186 return container.cpu().numpy()
187 except AttributeError:
188 return container
189
[end of haystack/modeling/evaluation/eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/modeling/evaluation/eval.py b/haystack/modeling/evaluation/eval.py
--- a/haystack/modeling/evaluation/eval.py
+++ b/haystack/modeling/evaluation/eval.py
@@ -8,6 +8,7 @@
from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics
from haystack.modeling.model.adaptive_model import AdaptiveModel
+from haystack.modeling.model.biadaptive_model import BiAdaptiveModel
from haystack.utils.experiment_tracking import Tracker as tracker
from haystack.modeling.visual import BUSH_SEP
@@ -69,13 +70,26 @@
with torch.no_grad():
- logits = model.forward(
- input_ids=batch.get("input_ids", None),
- segment_ids=batch.get("segment_ids", None),
- padding_mask=batch.get("padding_mask", None),
- output_hidden_states=batch.get("output_hidden_states", False),
- output_attentions=batch.get("output_attentions", False),
- )
+ if isinstance(model, AdaptiveModel):
+ logits = model.forward(
+ input_ids=batch.get("input_ids", None),
+ segment_ids=batch.get("segment_ids", None),
+ padding_mask=batch.get("padding_mask", None),
+ output_hidden_states=batch.get("output_hidden_states", False),
+ output_attentions=batch.get("output_attentions", False),
+ )
+ elif isinstance(model, BiAdaptiveModel):
+ logits = model.forward(
+ query_input_ids=batch.get("query_input_ids", None),
+ query_segment_ids=batch.get("query_segment_ids", None),
+ query_attention_mask=batch.get("query_attention_mask", None),
+ passage_input_ids=batch.get("passage_input_ids", None),
+ passage_segment_ids=batch.get("passage_segment_ids", None),
+ passage_attention_mask=batch.get("passage_attention_mask", None),
+ )
+ else:
+ logits = model.forward(**batch)
+
losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)
preds = model.logits_to_preds(logits=logits, **batch)
labels = model.prepare_labels(**batch)
| {"golden_diff": "diff --git a/haystack/modeling/evaluation/eval.py b/haystack/modeling/evaluation/eval.py\n--- a/haystack/modeling/evaluation/eval.py\n+++ b/haystack/modeling/evaluation/eval.py\n@@ -8,6 +8,7 @@\n \n from haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics\n from haystack.modeling.model.adaptive_model import AdaptiveModel\n+from haystack.modeling.model.biadaptive_model import BiAdaptiveModel\n from haystack.utils.experiment_tracking import Tracker as tracker\n from haystack.modeling.visual import BUSH_SEP\n \n@@ -69,13 +70,26 @@\n \n with torch.no_grad():\n \n- logits = model.forward(\n- input_ids=batch.get(\"input_ids\", None),\n- segment_ids=batch.get(\"segment_ids\", None),\n- padding_mask=batch.get(\"padding_mask\", None),\n- output_hidden_states=batch.get(\"output_hidden_states\", False),\n- output_attentions=batch.get(\"output_attentions\", False),\n- )\n+ if isinstance(model, AdaptiveModel):\n+ logits = model.forward(\n+ input_ids=batch.get(\"input_ids\", None),\n+ segment_ids=batch.get(\"segment_ids\", None),\n+ padding_mask=batch.get(\"padding_mask\", None),\n+ output_hidden_states=batch.get(\"output_hidden_states\", False),\n+ output_attentions=batch.get(\"output_attentions\", False),\n+ )\n+ elif isinstance(model, BiAdaptiveModel):\n+ logits = model.forward(\n+ query_input_ids=batch.get(\"query_input_ids\", None),\n+ query_segment_ids=batch.get(\"query_segment_ids\", None),\n+ query_attention_mask=batch.get(\"query_attention_mask\", None),\n+ passage_input_ids=batch.get(\"passage_input_ids\", None),\n+ passage_segment_ids=batch.get(\"passage_segment_ids\", None),\n+ passage_attention_mask=batch.get(\"passage_attention_mask\", None),\n+ )\n+ else:\n+ logits = model.forward(**batch)\n+\n losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)\n preds = model.logits_to_preds(logits=logits, **batch)\n labels = model.prepare_labels(**batch)\n", "issue": "DPR training is broken\n**Describe the bug**\r\nIt seems that DPR training does not work at the moment. I suspect that this bug was introduced by #2703.\r\n\r\n**Error message**\r\n```\r\n File \"/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py\", line 92, in <module>\r\n tutorial9_dpr_training()\r\n File \"/Users/bogdan/Repositories/haystack/tutorials/Tutorial9_DPR_training.py\", line 71, in tutorial9_dpr_training\r\n retriever.train(\r\n File \"/Users/bogdan/Repositories/haystack/haystack/nodes/retriever/dense.py\", line 680, in train\r\n trainer.train()\r\n File \"/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py\", line 290, in train\r\n loss = self.compute_loss(batch, step)\r\n File \"/Users/bogdan/Repositories/haystack/haystack/modeling/training/base.py\", line 374, in compute_loss\r\n logits = self.model.forward(**batch)\r\nTypeError: forward() got an unexpected keyword argument 'label_ids'\r\n```\r\n\r\n**To Reproduce**\r\nExecute Tutorial 9.\r\n\r\n\n", "before_files": [{"content": "from typing import Dict, List, Optional, Any\n\nimport logging\nimport numbers\nimport torch\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom haystack.modeling.evaluation.metrics import compute_metrics, compute_report_metrics\nfrom haystack.modeling.model.adaptive_model import AdaptiveModel\nfrom haystack.utils.experiment_tracking import Tracker as tracker\nfrom haystack.modeling.visual import BUSH_SEP\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Evaluator:\n \"\"\"\n Handles evaluation of a given model over a specified dataset.\n \"\"\"\n\n def __init__(self, data_loader: torch.utils.data.DataLoader, tasks, device: torch.device, report: bool = True):\n \"\"\"\n :param data_loader: The PyTorch DataLoader that will return batches of data from the evaluation dataset\n :param tesks:\n :param device: The device on which the tensors should be processed. Choose from torch.device(\"cpu\") and torch.device(\"cuda\").\n :param report: Whether an eval report should be generated (e.g. classification report per class).\n \"\"\"\n self.data_loader = data_loader\n self.tasks = tasks\n self.device = device\n self.report = report\n\n def eval(\n self,\n model: AdaptiveModel,\n return_preds_and_labels: bool = False,\n calibrate_conf_scores: bool = False,\n use_confidence_scores_for_ranking=True,\n use_no_answer_legacy_confidence=False,\n ) -> List[Dict]:\n \"\"\"\n Performs evaluation on a given model.\n\n :param model: The model on which to perform evaluation\n :param return_preds_and_labels: Whether to add preds and labels in the returned dicts of the\n :param calibrate_conf_scores: Whether to calibrate the temperature for temperature scaling of the confidence scores\n :param use_confidence_scores_for_ranking: Whether to sort answers by confidence score (normalized between 0 and 1)(default) or by standard score (unbounded).\n :param use_no_answer_legacy_confidence: Whether to use the legacy confidence definition for no_answer: difference between the best overall answer confidence and the no_answer gap confidence.\n Otherwise we use the no_answer score normalized to a range of [0,1] by an expit function (default).\n :return: all_results: A list of dictionaries, one for each prediction head. Each dictionary contains the metrics\n and reports generated during evaluation.\n \"\"\"\n model.prediction_heads[0].use_confidence_scores_for_ranking = use_confidence_scores_for_ranking\n model.prediction_heads[0].use_no_answer_legacy_confidence = use_no_answer_legacy_confidence\n model.eval()\n\n # init empty lists per prediction head\n loss_all: List = [0 for _ in model.prediction_heads]\n preds_all: List = [[] for _ in model.prediction_heads]\n label_all: List = [[] for _ in model.prediction_heads]\n ids_all: List = [[] for _ in model.prediction_heads]\n passage_start_t_all: List = [[] for _ in model.prediction_heads]\n logits_all: List = [[] for _ in model.prediction_heads]\n\n for step, batch in enumerate(tqdm(self.data_loader, desc=\"Evaluating\", mininterval=10)):\n batch = {key: batch[key].to(self.device) for key in batch}\n\n with torch.no_grad():\n\n logits = model.forward(\n input_ids=batch.get(\"input_ids\", None),\n segment_ids=batch.get(\"segment_ids\", None),\n padding_mask=batch.get(\"padding_mask\", None),\n output_hidden_states=batch.get(\"output_hidden_states\", False),\n output_attentions=batch.get(\"output_attentions\", False),\n )\n losses_per_head = model.logits_to_loss_per_head(logits=logits, **batch)\n preds = model.logits_to_preds(logits=logits, **batch)\n labels = model.prepare_labels(**batch)\n\n # stack results of all batches per prediction head\n for head_num, head in enumerate(model.prediction_heads):\n loss_all[head_num] += np.sum(_to_numpy(losses_per_head[head_num]))\n preds_all[head_num] += list(_to_numpy(preds[head_num]))\n label_all[head_num] += list(_to_numpy(labels[head_num]))\n if head.model_type == \"span_classification\":\n ids_all[head_num] += list(_to_numpy(batch[\"id\"]))\n passage_start_t_all[head_num] += list(_to_numpy(batch[\"passage_start_t\"]))\n if calibrate_conf_scores:\n logits_all[head_num] += list(_to_numpy(logits))\n\n # Evaluate per prediction head\n all_results = []\n for head_num, head in enumerate(model.prediction_heads):\n if head.model_type == \"span_classification\" and calibrate_conf_scores:\n temperature_previous = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores before calibration: {temperature_previous}\")\n head.calibrate_conf(logits_all[head_num], label_all[head_num])\n temperature_current = head.temperature_for_confidence.item()\n logger.info(f\"temperature used for confidence scores after calibration: {temperature_current}\")\n temperature_change = (abs(temperature_current - temperature_previous) / temperature_previous) * 100.0\n if temperature_change > 50:\n logger.warning(\n f\"temperature used for calibration of confidence scores changed by more than {temperature_change} percent\"\n )\n if hasattr(head, \"aggregate_preds\"):\n # Needed to convert NQ ids from np arrays to strings\n ids_all_str = [x.astype(str) for x in ids_all[head_num]]\n ids_all_list = [list(x) for x in ids_all_str]\n head_ids = [\"-\".join(x) for x in ids_all_list]\n preds_all[head_num], label_all[head_num] = head.aggregate_preds(\n preds=preds_all[head_num],\n labels=label_all[head_num],\n passage_start_t=passage_start_t_all[head_num],\n ids=head_ids,\n )\n result = {\"loss\": loss_all[head_num] / len(self.data_loader.dataset), \"task_name\": head.task_name}\n result.update(compute_metrics(metric=head.metric, preds=preds_all[head_num], labels=label_all[head_num]))\n # Select type of report depending on prediction head output type\n if self.report:\n try:\n result[\"report\"] = compute_report_metrics(head, preds_all[head_num], label_all[head_num])\n except:\n logger.error(\n f\"Couldn't create eval report for head {head_num} with following preds and labels:\"\n f\"\\n Preds: {preds_all[head_num]} \\n Labels: {label_all[head_num]}\"\n )\n result[\"report\"] = \"Error\"\n\n if return_preds_and_labels:\n result[\"preds\"] = preds_all[head_num]\n result[\"labels\"] = label_all[head_num]\n\n all_results.append(result)\n\n return all_results\n\n @staticmethod\n def log_results(\n results: List[Any],\n dataset_name: str,\n steps: int,\n logging: bool = True,\n print: bool = True,\n num_fold: Optional[int] = None,\n ):\n # Print a header\n header = \"\\n\\n\"\n header += BUSH_SEP + \"\\n\"\n header += \"***************************************************\\n\"\n if num_fold:\n header += (\n f\"***** EVALUATION | FOLD: {num_fold} | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n )\n else:\n header += f\"***** EVALUATION | {dataset_name.upper()} SET | AFTER {steps} BATCHES *****\\n\"\n header += \"***************************************************\\n\"\n header += BUSH_SEP + \"\\n\"\n logger.info(header)\n\n for head_num, head in enumerate(results):\n logger.info(\"\\n _________ {} _________\".format(head[\"task_name\"]))\n for metric_name, metric_val in head.items():\n # log with experiment tracking framework (e.g. Mlflow)\n if logging:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n if isinstance(metric_val, numbers.Number):\n tracker.track_metrics(\n metrics={f\"{dataset_name}_{metric_name}_{head['task_name']}\": metric_val}, step=steps\n )\n # print via standard python logger\n if print:\n if metric_name == \"report\":\n if isinstance(metric_val, str) and len(metric_val) > 8000:\n metric_val = metric_val[:7500] + \"\\n ............................. \\n\" + metric_val[-500:]\n logger.info(\"{}: \\n {}\".format(metric_name, metric_val))\n else:\n if not metric_name in [\"preds\", \"labels\"] and not metric_name.startswith(\"_\"):\n logger.info(\"{}: {}\".format(metric_name, metric_val))\n\n\ndef _to_numpy(container):\n try:\n return container.cpu().numpy()\n except AttributeError:\n return container\n", "path": "haystack/modeling/evaluation/eval.py"}]} | 3,196 | 470 |
gh_patches_debug_10505 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-1371 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ZeroMean for Batch Independent Multioutput GP
I'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).
```
class ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, nx):
super().__init__(train_x, train_y, likelihood)
self.n = nx #output dimension
#self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,
# num_tasks=self.n)
#self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),
batch_shape=torch.Size([self.n])
)
def forward(self, x):
mean_x = self.mean_module(x) # is this needed for ZeroMean?
covar_x = self.covar_module(x)
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
)
```
When training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?
</issue>
<code>
[start of gpytorch/means/zero_mean.py]
1 #!/usr/bin/env python3
2
3 import torch
4
5 from .mean import Mean
6
7
8 class ZeroMean(Mean):
9 def forward(self, input):
10 return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
11
[end of gpytorch/means/zero_mean.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py
--- a/gpytorch/means/zero_mean.py
+++ b/gpytorch/means/zero_mean.py
@@ -2,9 +2,18 @@
import torch
+from ..utils.broadcasting import _mul_broadcast_shape
from .mean import Mean
class ZeroMean(Mean):
+ def __init__(self, batch_shape=torch.Size(), **kwargs):
+ super(ZeroMean, self).__init__()
+ self.batch_shape = batch_shape
+
def forward(self, input):
- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)
+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)
+ if input.shape[:-2] == self.batch_shape:
+ return mean.expand(input.shape[:-1])
+ else:
+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))
| {"golden_diff": "diff --git a/gpytorch/means/zero_mean.py b/gpytorch/means/zero_mean.py\n--- a/gpytorch/means/zero_mean.py\n+++ b/gpytorch/means/zero_mean.py\n@@ -2,9 +2,18 @@\n \n import torch\n \n+from ..utils.broadcasting import _mul_broadcast_shape\n from .mean import Mean\n \n \n class ZeroMean(Mean):\n+ def __init__(self, batch_shape=torch.Size(), **kwargs):\n+ super(ZeroMean, self).__init__()\n+ self.batch_shape = batch_shape\n+\n def forward(self, input):\n- return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n+ mean = torch.zeros(*self.batch_shape, 1, dtype=input.dtype, device=input.device)\n+ if input.shape[:-2] == self.batch_shape:\n+ return mean.expand(input.shape[:-1])\n+ else:\n+ return mean.expand(_mul_broadcast_shape(input.shape[:-1], mean.shape))\n", "issue": "ZeroMean for Batch Independent Multioutput GP\nI'm following the Batch Independent Multioutput GP example, but instead of using a constant mean, I would like each dimension to use a zero mean (maybe this is a bad idea?).\r\n\r\n```\r\nclass ZeroMeanIndependentMultitaskGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood, nx):\r\n super().__init__(train_x, train_y, likelihood)\r\n self.n = nx #output dimension\r\n #self.mean_module = gpytorch.means.MultitaskMean([gpytorch.means.ZeroMean()]*self.n,\r\n # num_tasks=self.n)\r\n #self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([self.n]))\r\n self.mean_module = gpytorch.means.ZeroMean()\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.RBFKernel(batch_shape=torch.Size([self.n])),\r\n batch_shape=torch.Size([self.n])\r\n )\r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x) # is this needed for ZeroMean?\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(\r\n gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n )\r\n```\r\n\r\n\r\nWhen training with this, I get the error `RuntimeError: mean should be a matrix or a batch matrix (batch mode)`. It works as intended with constant mean. As you can see, I've tried a couple different things, but they don't seem to work either. I can't seem to find other people with the same issue online. Is it possible to do this with ZeroMean?\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport torch\n\nfrom .mean import Mean\n\n\nclass ZeroMean(Mean):\n def forward(self, input):\n return torch.zeros(input.shape[:-1], dtype=input.dtype, device=input.device)\n", "path": "gpytorch/means/zero_mean.py"}]} | 984 | 216 |
gh_patches_debug_1971 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-1994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Postponed annotation evaluation causes `Annotated` to break
When using postponed annotation evaluation, annotating resolver arguments no longer works:
```python
from __future__ import annotations
import random
from typing import Annotated
import strawberry
@strawberry.type
class Query:
@strawberry.field
def dice_roll(
self,
sides: Annotated[
int,
strawberry.argument(description="Number of sides the die should have."),
] = 6,
) -> int:
return random.randint(1, sides)
strawberry.Schema(query=Query)
```
The example above raises this TypeError:
```
TypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'
```
When the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.
</issue>
<code>
[start of strawberry/auto.py]
1 from __future__ import annotations
2
3 from typing import Any, Optional, Union, cast
4
5 from typing_extensions import Annotated, get_args, get_origin
6
7 from strawberry.type import StrawberryType
8
9 from .annotation import StrawberryAnnotation
10
11
12 class StrawberryAutoMeta(type):
13 """Metaclass for StrawberryAuto.
14
15 This is used to make sure StrawberryAuto is a singleton and also to
16 override the behavior of `isinstance` so that it consider the following
17 cases:
18
19 >> isinstance(StrawberryAuto(), StrawberryAuto)
20 True
21 >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)
22 True
23 >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)
24 True
25
26 """
27
28 def __init__(self, *args, **kwargs):
29 self._instance: Optional[StrawberryAuto] = None
30 super().__init__(*args, **kwargs)
31
32 def __call__(cls, *args, **kwargs):
33 if cls._instance is None:
34 cls._instance = super().__call__(*args, **kwargs)
35
36 return cls._instance
37
38 def __instancecheck__(
39 self,
40 instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],
41 ):
42 if isinstance(instance, StrawberryAnnotation):
43 resolved = instance.annotation
44 if isinstance(resolved, str):
45 namespace = instance.namespace
46 resolved = namespace and namespace.get(resolved)
47
48 if resolved is not None:
49 instance = cast(type, resolved)
50
51 if instance is auto:
52 return True
53
54 # Support uses of Annotated[auto, something()]
55 if get_origin(instance) is Annotated:
56 args = get_args(instance)
57 if args[0] is Any:
58 return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
59
60 return False
61
62
63 class StrawberryAuto(metaclass=StrawberryAutoMeta):
64 def __str__(self):
65 return "auto"
66
67 def __repr__(self):
68 return "<auto>"
69
70
71 auto = Annotated[Any, StrawberryAuto()]
72
[end of strawberry/auto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/strawberry/auto.py b/strawberry/auto.py
--- a/strawberry/auto.py
+++ b/strawberry/auto.py
@@ -57,7 +57,7 @@
if args[0] is Any:
return any(isinstance(arg, StrawberryAuto) for arg in args[1:])
- return False
+ return instance == "strawberry.auto"
class StrawberryAuto(metaclass=StrawberryAutoMeta):
| {"golden_diff": "diff --git a/strawberry/auto.py b/strawberry/auto.py\n--- a/strawberry/auto.py\n+++ b/strawberry/auto.py\n@@ -57,7 +57,7 @@\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n \n- return False\n+ return instance == \"strawberry.auto\"\n \n \n class StrawberryAuto(metaclass=StrawberryAutoMeta):\n", "issue": "Postponed annotation evaluation causes `Annotated` to break\nWhen using postponed annotation evaluation, annotating resolver arguments no longer works:\r\n\r\n```python\r\nfrom __future__ import annotations\r\n\r\nimport random\r\nfrom typing import Annotated\r\n\r\nimport strawberry\r\n\r\n\r\[email protected]\r\nclass Query:\r\n @strawberry.field\r\n def dice_roll(\r\n self,\r\n sides: Annotated[\r\n int,\r\n strawberry.argument(description=\"Number of sides the die should have.\"),\r\n ] = 6,\r\n ) -> int:\r\n return random.randint(1, sides)\r\n\r\n\r\nstrawberry.Schema(query=Query)\r\n```\r\n\r\nThe example above raises this TypeError:\r\n\r\n```\r\nTypeError: Query fields cannot be resolved. Unexpected type 'typing.Annotated[int, <strawberry.arguments.StrawberryArgumentAnnotation object at 0x7fd12e130d00>]'\r\n```\r\n\r\nWhen the first line (`from __future__ import annotations`) is left out, everything works as intended. This will probably also break once Python 3.11 lands, since the behavior will become mandatory then. #1586 refers to a somewhat related issue.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any, Optional, Union, cast\n\nfrom typing_extensions import Annotated, get_args, get_origin\n\nfrom strawberry.type import StrawberryType\n\nfrom .annotation import StrawberryAnnotation\n\n\nclass StrawberryAutoMeta(type):\n \"\"\"Metaclass for StrawberryAuto.\n\n This is used to make sure StrawberryAuto is a singleton and also to\n override the behavior of `isinstance` so that it consider the following\n cases:\n\n >> isinstance(StrawberryAuto(), StrawberryAuto)\n True\n >> isinstance(StrawberryAnnotation(StrawberryAuto()), StrawberryAuto)\n True\n >> isinstance(Annotated[StrawberryAuto(), object()), StrawberryAuto)\n True\n\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._instance: Optional[StrawberryAuto] = None\n super().__init__(*args, **kwargs)\n\n def __call__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = super().__call__(*args, **kwargs)\n\n return cls._instance\n\n def __instancecheck__(\n self,\n instance: Union[StrawberryAuto, StrawberryAnnotation, StrawberryType, type],\n ):\n if isinstance(instance, StrawberryAnnotation):\n resolved = instance.annotation\n if isinstance(resolved, str):\n namespace = instance.namespace\n resolved = namespace and namespace.get(resolved)\n\n if resolved is not None:\n instance = cast(type, resolved)\n\n if instance is auto:\n return True\n\n # Support uses of Annotated[auto, something()]\n if get_origin(instance) is Annotated:\n args = get_args(instance)\n if args[0] is Any:\n return any(isinstance(arg, StrawberryAuto) for arg in args[1:])\n\n return False\n\n\nclass StrawberryAuto(metaclass=StrawberryAutoMeta):\n def __str__(self):\n return \"auto\"\n\n def __repr__(self):\n return \"<auto>\"\n\n\nauto = Annotated[Any, StrawberryAuto()]\n", "path": "strawberry/auto.py"}]} | 1,348 | 104 |
gh_patches_debug_2541 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-2452 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Site unavailable
### What's wrong
Not sure where exactly to put this, but https://wemake-python-stylegui.de/ is unavailable
</issue>
<code>
[start of wemake_python_styleguide/formatter.py]
1 """
2 Our very own ``flake8`` formatter for better error messages.
3
4 That's how all ``flake8`` formatters work:
5
6 .. mermaid::
7 :caption: ``flake8`` formatting API calls order.
8
9 graph LR
10 F2[start] --> F3[after_init]
11 F3 --> F4[start]
12 F4 --> F5[beginning]
13 F5 --> F6[handle]
14 F6 --> F7[format]
15 F6 --> F8[show_source]
16 F6 --> F9[show_statistic]
17 F7 --> F10[finished]
18 F8 --> F10[finished]
19 F9 --> F10[finished]
20 F10 -.-> F5
21 F10 --> F11[stop]
22
23 .. autoclass:: WemakeFormatter
24 :no-undoc-members:
25
26 """
27
28 from collections import defaultdict
29 from typing import ClassVar, DefaultDict, List
30
31 from flake8.formatting.base import BaseFormatter
32 from flake8.statistics import Statistics
33 from flake8.style_guide import Violation
34 from pygments import highlight
35 from pygments.formatters import TerminalFormatter
36 from pygments.lexers import PythonLexer
37 from typing_extensions import Final
38
39 from wemake_python_styleguide.version import pkg_version
40
41 #: That url is generated and hosted by Sphinx.
42 DOCS_URL_TEMPLATE: Final = (
43 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
44 )
45
46 #: This url points to the specific violation page.
47 SHORTLINK_TEMPLATE: Final = (
48 'https://pyflak.es/{0}'
49 )
50
51
52 class WemakeFormatter(BaseFormatter): # noqa: WPS214
53 """
54 We need to format our style :term:`violations <violation>` beatifully.
55
56 The default formatter does not allow us to do that.
57 What things do we miss?
58
59 1. Spacing, everything is just mixed up and glued together
60 2. Colors and decoration, some information is easier
61 to gather just with colors or underlined text
62 3. Grouping, we need explicit grouping by filename
63 4. Incomplete and non-informative statistics
64
65 """
66
67 _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)
68
69 # API:
70
71 def after_init(self):
72 """Called after the original ``init`` is used to set extra fields."""
73 self._lexer = PythonLexer()
74 self._formatter = TerminalFormatter()
75
76 # Logic:
77 self._processed_filenames: List[str] = []
78 self._error_count = 0
79
80 def handle(self, error: Violation) -> None: # noqa: WPS110
81 """Processes each :term:`violation` to print it and all related."""
82 if error.filename not in self._processed_filenames:
83 self._print_header(error.filename)
84 self._processed_filenames.append(error.filename)
85
86 line = self.format(error)
87 source = self.show_source(error)
88 link = self._show_link(error)
89
90 self._write(line)
91 if link:
92 self._write(link)
93 if source:
94 self._write(source)
95
96 self._error_count += 1
97
98 def format(self, error: Violation) -> str: # noqa: WPS125
99 """Called to format each individual :term:`violation`."""
100 return '{newline} {row_col:<8} {code:<5} {text}'.format(
101 newline=self.newline if self._should_show_source(error) else '',
102 code=error.code,
103 text=error.text,
104 row_col='{0}:{1}'.format(error.line_number, error.column_number),
105 )
106
107 def show_source(self, error: Violation) -> str:
108 """Called when ``--show-source`` option is provided."""
109 if not self._should_show_source(error):
110 return ''
111
112 formatted_line = error.physical_line.lstrip()
113 adjust = len(error.physical_line) - len(formatted_line)
114
115 code = _highlight(
116 formatted_line,
117 self._lexer,
118 self._formatter,
119 )
120
121 return ' {code} {spacing}^'.format(
122 code=code,
123 spacing=' ' * (error.column_number - 1 - adjust),
124 )
125
126 def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210
127 """Called when ``--statistic`` option is passed."""
128 all_errors = 0
129 for error_code in statistics.error_codes():
130 stats_for_error_code = statistics.statistics_for(error_code)
131 statistic = next(stats_for_error_code)
132
133 count = statistic.count
134 count += sum(stat.count for stat in stats_for_error_code)
135 all_errors += count
136 error_by_file = _count_per_filename(statistics, error_code)
137
138 self._print_violation_per_file(
139 statistic,
140 error_code,
141 count,
142 error_by_file,
143 )
144
145 self._write(self.newline)
146 self._write(_underline(_bold('All errors: {0}'.format(all_errors))))
147
148 def stop(self) -> None:
149 """Runs once per app when the formatting ends."""
150 if self._error_count:
151 message = '{0}Full list of violations and explanations:{0}{1}'
152 self._write(message.format(self.newline, self._doc_url))
153
154 # Our own methods:
155
156 def _show_link(self, error: Violation) -> str:
157 """Called when ``--show-violation-links`` option is provided."""
158 if not self.options.show_violation_links:
159 return ''
160
161 return ' {spacing}-> {link}'.format(
162 spacing=' ' * 9,
163 link=SHORTLINK_TEMPLATE.format(error.code),
164 )
165
166 def _print_header(self, filename: str) -> None:
167 self._write(
168 '{newline}{filename}'.format(
169 filename=_underline(_bold(filename)),
170 newline=self.newline,
171 ),
172 )
173
174 def _print_violation_per_file(
175 self,
176 statistic: Statistics,
177 error_code: str,
178 count: int,
179 error_by_file: DefaultDict[str, int],
180 ):
181 self._write(
182 '{newline}{error_code}: {message}'.format(
183 newline=self.newline,
184 error_code=_bold(error_code),
185 message=statistic.message,
186 ),
187 )
188 for filename, error_count in error_by_file.items():
189 self._write(
190 ' {error_count:<5} {filename}'.format(
191 error_count=error_count,
192 filename=filename,
193 ),
194 )
195 self._write(_underline('Total: {0}'.format(count)))
196
197 def _should_show_source(self, error: Violation) -> bool:
198 return self.options.show_source and error.physical_line is not None
199
200
201 # Formatting text:
202
203 def _bold(text: str) -> str:
204 r"""
205 Returns bold formatted text.
206
207 >>> _bold('Hello!')
208 '\x1b[1mHello!\x1b[0m'
209
210 """
211 return '\033[1m{0}\033[0m'.format(text)
212
213
214 def _underline(text: str) -> str:
215 r"""
216 Returns underlined formatted text.
217
218 >>> _underline('Hello!')
219 '\x1b[4mHello!\x1b[0m'
220
221 """
222 return '\033[4m{0}\033[0m'.format(text)
223
224
225 def _highlight(source: str, lexer, formatter) -> str:
226 """
227 Highlights source code. Might fail.
228
229 See also:
230 https://github.com/wemake-services/wemake-python-styleguide/issues/794
231
232 """
233 try:
234 return highlight(source, lexer, formatter)
235 except Exception: # pragma: no cover
236 # Might fail on some systems, when colors are set incorrectly,
237 # or not available at all. In this case code will be just text.
238 return source
239
240
241 # Helpers:
242
243 def _count_per_filename(
244 statistics: Statistics,
245 error_code: str,
246 ) -> DefaultDict[str, int]:
247 filenames: DefaultDict[str, int] = defaultdict(int)
248 stats_for_error_code = statistics.statistics_for(error_code)
249
250 for stat in stats_for_error_code:
251 filenames[stat.filename] += stat.count
252
253 return filenames
254
[end of wemake_python_styleguide/formatter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py
--- a/wemake_python_styleguide/formatter.py
+++ b/wemake_python_styleguide/formatter.py
@@ -40,7 +40,7 @@
#: That url is generated and hosted by Sphinx.
DOCS_URL_TEMPLATE: Final = (
- 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'
+ 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'
)
#: This url points to the specific violation page.
| {"golden_diff": "diff --git a/wemake_python_styleguide/formatter.py b/wemake_python_styleguide/formatter.py\n--- a/wemake_python_styleguide/formatter.py\n+++ b/wemake_python_styleguide/formatter.py\n@@ -40,7 +40,7 @@\n \n #: That url is generated and hosted by Sphinx.\n DOCS_URL_TEMPLATE: Final = (\n- 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'\n+ 'https://wemake-python-styleguide.rtfd.io/en/{0}/pages/usage/violations/'\n )\n \n #: This url points to the specific violation page.\n", "issue": "Site unavailable\n### What's wrong\r\n\r\nNot sure where exactly to put this, but https://wemake-python-stylegui.de/ is unavailable\n", "before_files": [{"content": "\"\"\"\nOur very own ``flake8`` formatter for better error messages.\n\nThat's how all ``flake8`` formatters work:\n\n.. mermaid::\n :caption: ``flake8`` formatting API calls order.\n\n graph LR\n F2[start] --> F3[after_init]\n F3 --> F4[start]\n F4 --> F5[beginning]\n F5 --> F6[handle]\n F6 --> F7[format]\n F6\t --> F8[show_source]\n F6\t --> F9[show_statistic]\n F7 --> F10[finished]\n F8 --> F10[finished]\n F9 --> F10[finished]\n F10 -.-> F5\n F10 --> F11[stop]\n\n.. autoclass:: WemakeFormatter\n :no-undoc-members:\n\n\"\"\"\n\nfrom collections import defaultdict\nfrom typing import ClassVar, DefaultDict, List\n\nfrom flake8.formatting.base import BaseFormatter\nfrom flake8.statistics import Statistics\nfrom flake8.style_guide import Violation\nfrom pygments import highlight\nfrom pygments.formatters import TerminalFormatter\nfrom pygments.lexers import PythonLexer\nfrom typing_extensions import Final\n\nfrom wemake_python_styleguide.version import pkg_version\n\n#: That url is generated and hosted by Sphinx.\nDOCS_URL_TEMPLATE: Final = (\n 'https://wemake-python-stylegui.de/en/{0}/pages/usage/violations/'\n)\n\n#: This url points to the specific violation page.\nSHORTLINK_TEMPLATE: Final = (\n 'https://pyflak.es/{0}'\n)\n\n\nclass WemakeFormatter(BaseFormatter): # noqa: WPS214\n \"\"\"\n We need to format our style :term:`violations <violation>` beatifully.\n\n The default formatter does not allow us to do that.\n What things do we miss?\n\n 1. Spacing, everything is just mixed up and glued together\n 2. Colors and decoration, some information is easier\n to gather just with colors or underlined text\n 3. Grouping, we need explicit grouping by filename\n 4. Incomplete and non-informative statistics\n\n \"\"\"\n\n _doc_url: ClassVar[str] = DOCS_URL_TEMPLATE.format(pkg_version)\n\n # API:\n\n def after_init(self):\n \"\"\"Called after the original ``init`` is used to set extra fields.\"\"\"\n self._lexer = PythonLexer()\n self._formatter = TerminalFormatter()\n\n # Logic:\n self._processed_filenames: List[str] = []\n self._error_count = 0\n\n def handle(self, error: Violation) -> None: # noqa: WPS110\n \"\"\"Processes each :term:`violation` to print it and all related.\"\"\"\n if error.filename not in self._processed_filenames:\n self._print_header(error.filename)\n self._processed_filenames.append(error.filename)\n\n line = self.format(error)\n source = self.show_source(error)\n link = self._show_link(error)\n\n self._write(line)\n if link:\n self._write(link)\n if source:\n self._write(source)\n\n self._error_count += 1\n\n def format(self, error: Violation) -> str: # noqa: WPS125\n \"\"\"Called to format each individual :term:`violation`.\"\"\"\n return '{newline} {row_col:<8} {code:<5} {text}'.format(\n newline=self.newline if self._should_show_source(error) else '',\n code=error.code,\n text=error.text,\n row_col='{0}:{1}'.format(error.line_number, error.column_number),\n )\n\n def show_source(self, error: Violation) -> str:\n \"\"\"Called when ``--show-source`` option is provided.\"\"\"\n if not self._should_show_source(error):\n return ''\n\n formatted_line = error.physical_line.lstrip()\n adjust = len(error.physical_line) - len(formatted_line)\n\n code = _highlight(\n formatted_line,\n self._lexer,\n self._formatter,\n )\n\n return ' {code} {spacing}^'.format(\n code=code,\n spacing=' ' * (error.column_number - 1 - adjust),\n )\n\n def show_statistics(self, statistics: Statistics) -> None: # noqa: WPS210\n \"\"\"Called when ``--statistic`` option is passed.\"\"\"\n all_errors = 0\n for error_code in statistics.error_codes():\n stats_for_error_code = statistics.statistics_for(error_code)\n statistic = next(stats_for_error_code)\n\n count = statistic.count\n count += sum(stat.count for stat in stats_for_error_code)\n all_errors += count\n error_by_file = _count_per_filename(statistics, error_code)\n\n self._print_violation_per_file(\n statistic,\n error_code,\n count,\n error_by_file,\n )\n\n self._write(self.newline)\n self._write(_underline(_bold('All errors: {0}'.format(all_errors))))\n\n def stop(self) -> None:\n \"\"\"Runs once per app when the formatting ends.\"\"\"\n if self._error_count:\n message = '{0}Full list of violations and explanations:{0}{1}'\n self._write(message.format(self.newline, self._doc_url))\n\n # Our own methods:\n\n def _show_link(self, error: Violation) -> str:\n \"\"\"Called when ``--show-violation-links`` option is provided.\"\"\"\n if not self.options.show_violation_links:\n return ''\n\n return ' {spacing}-> {link}'.format(\n spacing=' ' * 9,\n link=SHORTLINK_TEMPLATE.format(error.code),\n )\n\n def _print_header(self, filename: str) -> None:\n self._write(\n '{newline}{filename}'.format(\n filename=_underline(_bold(filename)),\n newline=self.newline,\n ),\n )\n\n def _print_violation_per_file(\n self,\n statistic: Statistics,\n error_code: str,\n count: int,\n error_by_file: DefaultDict[str, int],\n ):\n self._write(\n '{newline}{error_code}: {message}'.format(\n newline=self.newline,\n error_code=_bold(error_code),\n message=statistic.message,\n ),\n )\n for filename, error_count in error_by_file.items():\n self._write(\n ' {error_count:<5} {filename}'.format(\n error_count=error_count,\n filename=filename,\n ),\n )\n self._write(_underline('Total: {0}'.format(count)))\n\n def _should_show_source(self, error: Violation) -> bool:\n return self.options.show_source and error.physical_line is not None\n\n\n# Formatting text:\n\ndef _bold(text: str) -> str:\n r\"\"\"\n Returns bold formatted text.\n\n >>> _bold('Hello!')\n '\\x1b[1mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[1m{0}\\033[0m'.format(text)\n\n\ndef _underline(text: str) -> str:\n r\"\"\"\n Returns underlined formatted text.\n\n >>> _underline('Hello!')\n '\\x1b[4mHello!\\x1b[0m'\n\n \"\"\"\n return '\\033[4m{0}\\033[0m'.format(text)\n\n\ndef _highlight(source: str, lexer, formatter) -> str:\n \"\"\"\n Highlights source code. Might fail.\n\n See also:\n https://github.com/wemake-services/wemake-python-styleguide/issues/794\n\n \"\"\"\n try:\n return highlight(source, lexer, formatter)\n except Exception: # pragma: no cover\n # Might fail on some systems, when colors are set incorrectly,\n # or not available at all. In this case code will be just text.\n return source\n\n\n# Helpers:\n\ndef _count_per_filename(\n statistics: Statistics,\n error_code: str,\n) -> DefaultDict[str, int]:\n filenames: DefaultDict[str, int] = defaultdict(int)\n stats_for_error_code = statistics.statistics_for(error_code)\n\n for stat in stats_for_error_code:\n filenames[stat.filename] += stat.count\n\n return filenames\n", "path": "wemake_python_styleguide/formatter.py"}]} | 3,061 | 139 |
gh_patches_debug_36330 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-166 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix health checker /ping/ - infinite looping
Improve check_database_connection function
We can improve this code like that:
```python
def check_database_connection(**kwargs):
for conn in connections.all():
try:
conn.cursor()
return True
except OperationalError:
return False
return False
```
reported by @eltonplima in #158
Improve check_database_connection function
We can improve this code like that:
```python
def check_database_connection(**kwargs):
for conn in connections.all():
try:
conn.cursor()
return True
except OperationalError:
return False
return False
```
reported by @eltonplima in #158
</issue>
<code>
[start of bothub/health/checks.py]
1 def check_database_connection(**kwargs):
2 from django.db import connections
3 from django.db.utils import OperationalError
4 db_conn = connections['default']
5 if not db_conn:
6 return False
7 try:
8 db_conn.cursor()
9 return True
10 except OperationalError as e:
11 return False
12
13
14 def check_accessible_api(request, **kwargs):
15 import requests
16 HTTP_HOST = request.META.get('HTTP_HOST')
17 repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)
18 request = requests.get(repositories_url)
19 try:
20 request.raise_for_status()
21 return True
22 except requests.HTTPError as e:
23 return False
24
[end of bothub/health/checks.py]
[start of bothub/settings.py]
1 import os
2 import dj_database_url
3
4 from decouple import config
5
6
7 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
9
10
11 # SECURITY WARNING: keep the secret key used in production secret!
12 SECRET_KEY = config('SECRET_KEY')
13
14 # SECURITY WARNING: don't run with debug turned on in production!
15 DEBUG = config('DEBUG', default=False, cast=bool)
16
17 ALLOWED_HOSTS = config(
18 'ALLOWED_HOSTS',
19 default='*',
20 cast=lambda v: [s.strip() for s in v.split(',')])
21
22
23 # Application definition
24
25 INSTALLED_APPS = [
26 'django.contrib.admin',
27 'django.contrib.auth',
28 'django.contrib.contenttypes',
29 'django.contrib.sessions',
30 'django.contrib.messages',
31 'django.contrib.staticfiles',
32 'rest_framework',
33 'rest_framework.authtoken',
34 'django_filters',
35 'corsheaders',
36 'bothub.authentication',
37 'bothub.common',
38 'bothub.api',
39 ]
40
41 MIDDLEWARE = [
42 'django.middleware.security.SecurityMiddleware',
43 'whitenoise.middleware.WhiteNoiseMiddleware',
44 'django.contrib.sessions.middleware.SessionMiddleware',
45 'corsheaders.middleware.CorsMiddleware',
46 'django.middleware.common.CommonMiddleware',
47 'django.middleware.csrf.CsrfViewMiddleware',
48 'django.contrib.auth.middleware.AuthenticationMiddleware',
49 'django.contrib.messages.middleware.MessageMiddleware',
50 'django.middleware.clickjacking.XFrameOptionsMiddleware',
51 ]
52
53 ROOT_URLCONF = 'bothub.urls'
54
55 TEMPLATES = [
56 {
57 'BACKEND': 'django.template.backends.django.DjangoTemplates',
58 'DIRS': [],
59 'APP_DIRS': True,
60 'OPTIONS': {
61 'context_processors': [
62 'django.template.context_processors.debug',
63 'django.template.context_processors.request',
64 'django.contrib.auth.context_processors.auth',
65 'django.contrib.messages.context_processors.messages',
66 ],
67 },
68 },
69 ]
70
71 WSGI_APPLICATION = 'bothub.wsgi.application'
72
73
74 # Database
75
76 DATABASES = {}
77 DATABASES['default'] = dj_database_url.parse(
78 config(
79 'DEFAULT_DATABASE',
80 default='sqlite:///db.sqlite3'))
81
82
83 # Auth
84
85 AUTH_USER_MODEL = 'authentication.User'
86
87
88 # Password validation
89
90 AUTH_PASSWORD_VALIDATORS = [
91 {
92 'NAME': 'django.contrib.auth.password_validation.' +
93 'UserAttributeSimilarityValidator',
94 },
95 {
96 'NAME': 'django.contrib.auth.password_validation.' +
97 'MinimumLengthValidator',
98 },
99 {
100 'NAME': 'django.contrib.auth.password_validation.' +
101 'CommonPasswordValidator',
102 },
103 {
104 'NAME': 'django.contrib.auth.password_validation.' +
105 'NumericPasswordValidator',
106 },
107 ]
108
109
110 # Internationalization
111
112 LANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')
113
114 TIME_ZONE = config('TIME_ZONE', default='UTC')
115
116 USE_I18N = True
117
118 USE_L10N = True
119
120 USE_TZ = True
121
122
123 # Static files (CSS, JavaScript, Images)
124
125 STATIC_URL = config('STATIC_URL', default='/static/')
126
127 STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
128
129 STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
130
131
132 # rest framework
133
134 REST_FRAMEWORK = {
135 'DEFAULT_AUTHENTICATION_CLASSES': [
136 'rest_framework.authentication.TokenAuthentication',
137 ],
138 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +
139 'LimitOffsetPagination',
140 'PAGE_SIZE': 20,
141 'DEFAULT_FILTER_BACKENDS': [
142 'django_filters.rest_framework.DjangoFilterBackend',
143 ],
144 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',
145 }
146
147
148 # cors headers
149
150 CORS_ORIGIN_ALLOW_ALL = True
151 CORS_URLS_REGEX = r'^/api/.*$'
152
153
154 # mail
155
156 envvar_EMAIL_HOST = config('EMAIL_HOST', default=None)
157
158 ADMINS = config(
159 'ADMINS',
160 default='',
161 cast=lambda v: [
162 (
163 s.strip().split('|')[0],
164 s.strip().split('|')[1],
165 ) for s in v.split(',')] if v else [])
166 EMAIL_SUBJECT_PREFIX = '[bothub] '
167 DEFAULT_FROM_EMAIL = config(
168 'DEFAULT_FROM_EMAIL',
169 default='webmaster@localhost')
170 SERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')
171
172 if envvar_EMAIL_HOST:
173 EMAIL_HOST = envvar_EMAIL_HOST
174 EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
175 EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
176 EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
177 EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)
178 EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
179 else:
180 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
181
182
183 # webapp
184
185 BOTHUB_WEBAPP_BASE_URL = config(
186 'BOTHUB_WEBAPP_BASE_URL',
187 default='http://localhost:8080/')
188
189
190 # NLP
191
192 BOTHUB_NLP_BASE_URL = config(
193 'BOTHUB_NLP_BASE_URL',
194 default='http://localhost:8001/')
195
196
197 # CSRF
198
199 CSRF_COOKIE_DOMAIN = config(
200 'CSRF_COOKIE_DOMAIN',
201 default=None)
202
203 CSRF_COOKIE_SECURE = config(
204 'CSRF_COOKIE_SECURE',
205 default=False,
206 cast=bool)
207
[end of bothub/settings.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bothub/health/checks.py b/bothub/health/checks.py
--- a/bothub/health/checks.py
+++ b/bothub/health/checks.py
@@ -1,23 +1,37 @@
+import logging
+
+from rest_framework import status
+
+
+logger = logging.getLogger('bothub.health.checks')
+
+CHECK_ACCESSIBLE_API_URL = '/api/repositories/'
+
+
def check_database_connection(**kwargs):
from django.db import connections
from django.db.utils import OperationalError
- db_conn = connections['default']
- if not db_conn:
- return False
- try:
- db_conn.cursor()
- return True
- except OperationalError as e:
+ if len(connections.all()) is 0:
return False
+ logger.info('found {} database connection'.format(len(connections.all())))
+ for i, conn in enumerate(connections.all(), 1):
+ try:
+ conn.cursor()
+ logger.info('#{} db connection OKAY'.format(i))
+ except OperationalError as e:
+ logger.warning('#{} db connection ERROR'.format(i))
+ return False
+ return True
def check_accessible_api(request, **kwargs):
- import requests
- HTTP_HOST = request.META.get('HTTP_HOST')
- repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)
- request = requests.get(repositories_url)
- try:
- request.raise_for_status()
+ from django.test import Client
+ logger.info('making request to {}'.format(CHECK_ACCESSIBLE_API_URL))
+ client = Client()
+ response = client.get(CHECK_ACCESSIBLE_API_URL)
+ logger.info('{} status code: {}'.format(
+ CHECK_ACCESSIBLE_API_URL,
+ response.status_code))
+ if response.status_code is status.HTTP_200_OK:
return True
- except requests.HTTPError as e:
- return False
+ return False
diff --git a/bothub/settings.py b/bothub/settings.py
--- a/bothub/settings.py
+++ b/bothub/settings.py
@@ -2,6 +2,7 @@
import dj_database_url
from decouple import config
+from django.utils.log import DEFAULT_LOGGING
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
@@ -191,7 +192,7 @@
BOTHUB_NLP_BASE_URL = config(
'BOTHUB_NLP_BASE_URL',
- default='http://localhost:8001/')
+ default='http://localhost:2657/')
# CSRF
@@ -204,3 +205,21 @@
'CSRF_COOKIE_SECURE',
default=False,
cast=bool)
+
+
+# Logging
+
+LOGGING = DEFAULT_LOGGING
+LOGGING['formatters']['bothub.health'] = {
+ 'format': '[bothub.health] {message}',
+ 'style': '{',
+}
+LOGGING['handlers']['bothub.health'] = {
+ 'level': 'DEBUG',
+ 'class': 'logging.StreamHandler',
+ 'formatter': 'bothub.health',
+}
+LOGGING['loggers']['bothub.health.checks'] = {
+ 'handlers': ['bothub.health'],
+ 'level': 'DEBUG',
+}
| {"golden_diff": "diff --git a/bothub/health/checks.py b/bothub/health/checks.py\n--- a/bothub/health/checks.py\n+++ b/bothub/health/checks.py\n@@ -1,23 +1,37 @@\n+import logging\n+\n+from rest_framework import status\n+\n+\n+logger = logging.getLogger('bothub.health.checks')\n+\n+CHECK_ACCESSIBLE_API_URL = '/api/repositories/'\n+\n+\n def check_database_connection(**kwargs):\n from django.db import connections\n from django.db.utils import OperationalError\n- db_conn = connections['default']\n- if not db_conn:\n- return False\n- try:\n- db_conn.cursor()\n- return True\n- except OperationalError as e:\n+ if len(connections.all()) is 0:\n return False\n+ logger.info('found {} database connection'.format(len(connections.all())))\n+ for i, conn in enumerate(connections.all(), 1):\n+ try:\n+ conn.cursor()\n+ logger.info('#{} db connection OKAY'.format(i))\n+ except OperationalError as e:\n+ logger.warning('#{} db connection ERROR'.format(i))\n+ return False\n+ return True\n \n \n def check_accessible_api(request, **kwargs):\n- import requests\n- HTTP_HOST = request.META.get('HTTP_HOST')\n- repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)\n- request = requests.get(repositories_url)\n- try:\n- request.raise_for_status()\n+ from django.test import Client\n+ logger.info('making request to {}'.format(CHECK_ACCESSIBLE_API_URL))\n+ client = Client()\n+ response = client.get(CHECK_ACCESSIBLE_API_URL)\n+ logger.info('{} status code: {}'.format(\n+ CHECK_ACCESSIBLE_API_URL,\n+ response.status_code))\n+ if response.status_code is status.HTTP_200_OK:\n return True\n- except requests.HTTPError as e:\n- return False\n+ return False\ndiff --git a/bothub/settings.py b/bothub/settings.py\n--- a/bothub/settings.py\n+++ b/bothub/settings.py\n@@ -2,6 +2,7 @@\n import dj_database_url\n \n from decouple import config\n+from django.utils.log import DEFAULT_LOGGING\n \n \n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n@@ -191,7 +192,7 @@\n \n BOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n- default='http://localhost:8001/')\n+ default='http://localhost:2657/')\n \n \n # CSRF\n@@ -204,3 +205,21 @@\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n+\n+\n+# Logging\n+\n+LOGGING = DEFAULT_LOGGING\n+LOGGING['formatters']['bothub.health'] = {\n+ 'format': '[bothub.health] {message}',\n+ 'style': '{',\n+}\n+LOGGING['handlers']['bothub.health'] = {\n+ 'level': 'DEBUG',\n+ 'class': 'logging.StreamHandler',\n+ 'formatter': 'bothub.health',\n+}\n+LOGGING['loggers']['bothub.health.checks'] = {\n+ 'handlers': ['bothub.health'],\n+ 'level': 'DEBUG',\n+}\n", "issue": "Fix health checker /ping/ - infinite looping\n\nImprove check_database_connection function\nWe can improve this code like that:\r\n\r\n```python\r\ndef check_database_connection(**kwargs):\r\n for conn in connections.all():\r\n try:\r\n conn.cursor()\r\n return True\r\n except OperationalError:\r\n return False\r\n return False\r\n```\r\n\r\nreported by @eltonplima in #158 \nImprove check_database_connection function\nWe can improve this code like that:\r\n\r\n```python\r\ndef check_database_connection(**kwargs):\r\n for conn in connections.all():\r\n try:\r\n conn.cursor()\r\n return True\r\n except OperationalError:\r\n return False\r\n return False\r\n```\r\n\r\nreported by @eltonplima in #158 \n", "before_files": [{"content": "def check_database_connection(**kwargs):\n from django.db import connections\n from django.db.utils import OperationalError\n db_conn = connections['default']\n if not db_conn:\n return False\n try:\n db_conn.cursor()\n return True\n except OperationalError as e:\n return False\n\n\ndef check_accessible_api(request, **kwargs):\n import requests\n HTTP_HOST = request.META.get('HTTP_HOST')\n repositories_url = 'http://{}/api/repositories/'.format(HTTP_HOST)\n request = requests.get(repositories_url)\n try:\n request.raise_for_status()\n return True\n except requests.HTTPError as e:\n return False\n", "path": "bothub/health/checks.py"}, {"content": "import os\nimport dj_database_url\n\nfrom decouple import config\n\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config(\n 'ALLOWED_HOSTS',\n default='*',\n cast=lambda v: [s.strip() for s in v.split(',')])\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'django_filters',\n 'corsheaders',\n 'bothub.authentication',\n 'bothub.common',\n 'bothub.api',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'whitenoise.middleware.WhiteNoiseMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'bothub.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'bothub.wsgi.application'\n\n\n# Database\n\nDATABASES = {}\nDATABASES['default'] = dj_database_url.parse(\n config(\n 'DEFAULT_DATABASE',\n default='sqlite:///db.sqlite3'))\n\n\n# Auth\n\nAUTH_USER_MODEL = 'authentication.User'\n\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.' +\n 'NumericPasswordValidator',\n },\n]\n\n\n# Internationalization\n\nLANGUAGE_CODE = config('LANGUAGE_CODE', default='en-us')\n\nTIME_ZONE = config('TIME_ZONE', default='UTC')\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = config('STATIC_URL', default='/static/')\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')\n\nSTATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'\n\n\n# rest framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.' +\n 'LimitOffsetPagination',\n 'PAGE_SIZE': 20,\n 'DEFAULT_FILTER_BACKENDS': [\n 'django_filters.rest_framework.DjangoFilterBackend',\n ],\n 'DEFAULT_METADATA_CLASS': 'bothub.api.metadata.Metadata',\n}\n\n\n# cors headers\n\nCORS_ORIGIN_ALLOW_ALL = True\nCORS_URLS_REGEX = r'^/api/.*$'\n\n\n# mail\n\nenvvar_EMAIL_HOST = config('EMAIL_HOST', default=None)\n\nADMINS = config(\n 'ADMINS',\n default='',\n cast=lambda v: [\n (\n s.strip().split('|')[0],\n s.strip().split('|')[1],\n ) for s in v.split(',')] if v else [])\nEMAIL_SUBJECT_PREFIX = '[bothub] '\nDEFAULT_FROM_EMAIL = config(\n 'DEFAULT_FROM_EMAIL',\n default='webmaster@localhost')\nSERVER_EMAIL = config('SERVER_EMAIL', default='root@localhost')\n\nif envvar_EMAIL_HOST:\n EMAIL_HOST = envvar_EMAIL_HOST\n EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)\n EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')\n EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')\n EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=False, cast=bool)\n EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)\nelse:\n EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# webapp\n\nBOTHUB_WEBAPP_BASE_URL = config(\n 'BOTHUB_WEBAPP_BASE_URL',\n default='http://localhost:8080/')\n\n\n# NLP\n\nBOTHUB_NLP_BASE_URL = config(\n 'BOTHUB_NLP_BASE_URL',\n default='http://localhost:8001/')\n\n\n# CSRF\n\nCSRF_COOKIE_DOMAIN = config(\n 'CSRF_COOKIE_DOMAIN',\n default=None)\n\nCSRF_COOKIE_SECURE = config(\n 'CSRF_COOKIE_SECURE',\n default=False,\n cast=bool)\n", "path": "bothub/settings.py"}]} | 2,566 | 738 |
gh_patches_debug_10295 | rasdani/github-patches | git_diff | holoviz__panel-723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scipy now required dependency?
The latest dev build of panel (panel-0.7.0a15-py_0) looks like it is trying to import scipy, but scipy is not among the required dependencies that are automatically installed through conda.
```
$ conda create -n panel -c pyviz/label/dev panel
...
$ conda activate panel
(panel) $ python
Python 3.7.4 (default, Aug 13 2019, 15:17:50)
[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import panel
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/__init__.py", line 6, in <module>
from . import links # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/links.py", line 12, in <module>
from .pane.holoviews import HoloViews, generate_panel_bokeh_map, is_bokeh_element_plot
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/__init__.py", line 13, in <module>
from .holoviews import HoloViews # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/holoviews.py", line 20, in <module>
from ..widgets import Player
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/__init__.py", line 12, in <module>
from .misc import Audio, VideoStream # noqa
File "/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/misc.py", line 14, in <module>
from scipy.io import wavfile
ModuleNotFoundError: No module named 'scipy'
```
</issue>
<code>
[start of panel/widgets/misc.py]
1 """
2 Miscellaneous widgets which do not fit into the other main categories.
3 """
4 from __future__ import absolute_import, division, unicode_literals
5
6 import os
7
8 from io import BytesIO
9 from base64 import b64encode
10 from six import string_types
11
12 import param
13 import numpy as np
14 from scipy.io import wavfile
15
16 from ..io.notebook import push
17 from ..io.state import state
18 from ..models import (Audio as _BkAudio,
19 VideoStream as _BkVideoStream)
20 from .base import Widget
21
22
23 class Audio(Widget):
24
25 loop = param.Boolean(default=False, doc="""
26 Whether the audio should loop""")
27
28 time = param.Number(default=0, doc="""
29 The current timestamp""")
30
31 throttle = param.Integer(default=250, doc="""
32 How frequently to sample the current playback time in milliseconds""")
33
34 paused = param.Boolean(default=True, doc="""
35 Whether the audio is currently paused""")
36
37 sample_rate = param.Integer(default=44100, doc="""
38 The sample_rate of the audio when given a NumPy array.""")
39
40 value = param.ClassSelector(default='', class_=(string_types + (np.ndarray,)), doc="""
41 The audio file either local or remote.""")
42
43 volume = param.Number(default=None, bounds=(0, 100), doc="""
44 The volume of the audio player.""")
45
46 _widget_type = _BkAudio
47
48 _rename = {'name': None, 'sample_rate': None}
49
50 def _from_numpy(self, data):
51 buffer = BytesIO()
52 wavfile.write(buffer, self.sample_rate, data)
53 return buffer
54
55 def _process_param_change(self, msg):
56 msg = super(Audio, self)._process_param_change(msg)
57
58 if 'value' in msg:
59 value = msg['value']
60 if isinstance(value, np.ndarray):
61 fmt = 'wav'
62 buffer = self._from_numpy(value)
63 data = b64encode(buffer.getvalue())
64 elif os.path.isfile(value):
65 fmt = value.split('.')[-1]
66 with open(value, 'rb') as f:
67 data = f.read()
68 data = b64encode(data)
69 elif value.lower().startswith('http'):
70 return msg
71 elif not value:
72 data, fmt = b'', 'wav'
73 else:
74 raise ValueError('Value should be either path to a sound file or numpy array')
75 template = 'data:audio/{mime};base64,{data}'
76 msg['value'] = template.format(data=data.decode('utf-8'),
77 mime=fmt)
78 return msg
79
80
81
82 class VideoStream(Widget):
83
84 format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],
85 doc="""
86 The file format as which the video is returned.""")
87
88 paused = param.Boolean(default=False, doc="""
89 Whether the video is currently paused""")
90
91 timeout = param.Number(default=None, doc="""
92 Interval between snapshots in millisecons""")
93
94 value = param.String(default='', doc="""
95 A base64 representation of the video stream snapshot.""")
96
97 _widget_type = _BkVideoStream
98
99 _rename = {'name': None}
100
101 def snapshot(self):
102 """
103 Triggers a snapshot of the current VideoStream state to sync
104 the widget value.
105 """
106 for ref, (m, _) in self._models.items():
107 m.snapshot = not m.snapshot
108 (self, root, doc, comm) = state._views[ref]
109 if comm and 'embedded' not in root.tags:
110 push(doc, comm)
111
[end of panel/widgets/misc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py
--- a/panel/widgets/misc.py
+++ b/panel/widgets/misc.py
@@ -11,7 +11,6 @@
import param
import numpy as np
-from scipy.io import wavfile
from ..io.notebook import push
from ..io.state import state
@@ -48,6 +47,7 @@
_rename = {'name': None, 'sample_rate': None}
def _from_numpy(self, data):
+ from scipy.io import wavfile
buffer = BytesIO()
wavfile.write(buffer, self.sample_rate, data)
return buffer
| {"golden_diff": "diff --git a/panel/widgets/misc.py b/panel/widgets/misc.py\n--- a/panel/widgets/misc.py\n+++ b/panel/widgets/misc.py\n@@ -11,7 +11,6 @@\n \n import param\n import numpy as np\n-from scipy.io import wavfile\n \n from ..io.notebook import push\n from ..io.state import state\n@@ -48,6 +47,7 @@\n _rename = {'name': None, 'sample_rate': None}\n \n def _from_numpy(self, data):\n+ from scipy.io import wavfile\n buffer = BytesIO()\n wavfile.write(buffer, self.sample_rate, data)\n return buffer\n", "issue": "Scipy now required dependency?\nThe latest dev build of panel (panel-0.7.0a15-py_0) looks like it is trying to import scipy, but scipy is not among the required dependencies that are automatically installed through conda.\r\n\r\n```\r\n$ conda create -n panel -c pyviz/label/dev panel\r\n...\r\n$ conda activate panel\r\n(panel) $ python\r\nPython 3.7.4 (default, Aug 13 2019, 15:17:50) \r\n[Clang 4.0.1 (tags/RELEASE_401/final)] :: Anaconda, Inc. on darwin\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import panel\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/__init__.py\", line 6, in <module>\r\n from . import links # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/links.py\", line 12, in <module>\r\n from .pane.holoviews import HoloViews, generate_panel_bokeh_map, is_bokeh_element_plot\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/__init__.py\", line 13, in <module>\r\n from .holoviews import HoloViews # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/pane/holoviews.py\", line 20, in <module>\r\n from ..widgets import Player\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/__init__.py\", line 12, in <module>\r\n from .misc import Audio, VideoStream # noqa\r\n File \"/Users/rditlsc9/miniconda/envs/panel/lib/python3.7/site-packages/panel/widgets/misc.py\", line 14, in <module>\r\n from scipy.io import wavfile\r\nModuleNotFoundError: No module named 'scipy'\r\n```\n", "before_files": [{"content": "\"\"\"\nMiscellaneous widgets which do not fit into the other main categories.\n\"\"\"\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport os\n\nfrom io import BytesIO\nfrom base64 import b64encode\nfrom six import string_types\n\nimport param\nimport numpy as np\nfrom scipy.io import wavfile\n\nfrom ..io.notebook import push\nfrom ..io.state import state\nfrom ..models import (Audio as _BkAudio,\n VideoStream as _BkVideoStream)\nfrom .base import Widget\n\n\nclass Audio(Widget):\n\n loop = param.Boolean(default=False, doc=\"\"\"\n Whether the audio should loop\"\"\")\n\n time = param.Number(default=0, doc=\"\"\"\n The current timestamp\"\"\")\n\n throttle = param.Integer(default=250, doc=\"\"\"\n How frequently to sample the current playback time in milliseconds\"\"\")\n\n paused = param.Boolean(default=True, doc=\"\"\"\n Whether the audio is currently paused\"\"\")\n\n sample_rate = param.Integer(default=44100, doc=\"\"\"\n The sample_rate of the audio when given a NumPy array.\"\"\")\n\n value = param.ClassSelector(default='', class_=(string_types + (np.ndarray,)), doc=\"\"\"\n The audio file either local or remote.\"\"\")\n\n volume = param.Number(default=None, bounds=(0, 100), doc=\"\"\"\n The volume of the audio player.\"\"\")\n\n _widget_type = _BkAudio\n\n _rename = {'name': None, 'sample_rate': None}\n\n def _from_numpy(self, data):\n buffer = BytesIO()\n wavfile.write(buffer, self.sample_rate, data)\n return buffer\n\n def _process_param_change(self, msg):\n msg = super(Audio, self)._process_param_change(msg)\n\n if 'value' in msg:\n value = msg['value']\n if isinstance(value, np.ndarray):\n fmt = 'wav'\n buffer = self._from_numpy(value)\n data = b64encode(buffer.getvalue())\n elif os.path.isfile(value):\n fmt = value.split('.')[-1]\n with open(value, 'rb') as f:\n data = f.read()\n data = b64encode(data)\n elif value.lower().startswith('http'):\n return msg\n elif not value:\n data, fmt = b'', 'wav'\n else:\n raise ValueError('Value should be either path to a sound file or numpy array')\n template = 'data:audio/{mime};base64,{data}'\n msg['value'] = template.format(data=data.decode('utf-8'),\n mime=fmt)\n return msg\n\n\n\nclass VideoStream(Widget):\n\n format = param.ObjectSelector(default='png', objects=['png', 'jpeg'],\n doc=\"\"\"\n The file format as which the video is returned.\"\"\")\n\n paused = param.Boolean(default=False, doc=\"\"\"\n Whether the video is currently paused\"\"\")\n\n timeout = param.Number(default=None, doc=\"\"\"\n Interval between snapshots in millisecons\"\"\")\n\n value = param.String(default='', doc=\"\"\"\n A base64 representation of the video stream snapshot.\"\"\")\n\n _widget_type = _BkVideoStream\n\n _rename = {'name': None}\n\n def snapshot(self):\n \"\"\"\n Triggers a snapshot of the current VideoStream state to sync\n the widget value.\n \"\"\"\n for ref, (m, _) in self._models.items():\n m.snapshot = not m.snapshot\n (self, root, doc, comm) = state._views[ref]\n if comm and 'embedded' not in root.tags:\n push(doc, comm)\n", "path": "panel/widgets/misc.py"}]} | 2,057 | 143 |
gh_patches_debug_11102 | rasdani/github-patches | git_diff | ultralytics__yolov5-10359 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
clearml logs defaults args when use run function
### Search before asking
- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.
### YOLOv5 Component
_No response_
### Bug
when you want to run training with the `run` function as described here https://github.com/ultralytics/yolov5/blob/10c025d794ca395a2ca0b2a00aff65f3a92ecd8d/train.py#L622
the clearml args logging
https://github.com/allegroai/clearml/blob/7016138c849a4f8d0b4d296b319e0b23a1b7bd9e/clearml/binding/args.py#L231
skip the modification of the parameters
in the end, when you look in the clearml server on configurations Args you see the default args parameters and not the one you trained with
(Training is still with the good parameters you pass )
### Environment
doker
`from ultralytics/yolov5:latest`
### Minimal Reproducible Example
python
```
import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')
```
in clearml server configurations Args you will see `weights='yolov5s.pt'` `imgsz=640`
### Additional
_No response_
### Are you willing to submit a PR?
- [ ] Yes I'd like to help by submitting a PR!
</issue>
<code>
[start of utils/loggers/clearml/clearml_utils.py]
1 """Main Logger class for ClearML experiment tracking."""
2 import glob
3 import re
4 from pathlib import Path
5
6 import numpy as np
7 import yaml
8
9 from utils.plots import Annotator, colors
10
11 try:
12 import clearml
13 from clearml import Dataset, Task
14
15 assert hasattr(clearml, '__version__') # verify package import not local dir
16 except (ImportError, AssertionError):
17 clearml = None
18
19
20 def construct_dataset(clearml_info_string):
21 """Load in a clearml dataset and fill the internal data_dict with its contents.
22 """
23 dataset_id = clearml_info_string.replace('clearml://', '')
24 dataset = Dataset.get(dataset_id=dataset_id)
25 dataset_root_path = Path(dataset.get_local_copy())
26
27 # We'll search for the yaml file definition in the dataset
28 yaml_filenames = list(glob.glob(str(dataset_root_path / "*.yaml")) + glob.glob(str(dataset_root_path / "*.yml")))
29 if len(yaml_filenames) > 1:
30 raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '
31 'the dataset definition this way.')
32 elif len(yaml_filenames) == 0:
33 raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '
34 'inside the dataset root path.')
35 with open(yaml_filenames[0]) as f:
36 dataset_definition = yaml.safe_load(f)
37
38 assert set(dataset_definition.keys()).issuperset(
39 {'train', 'test', 'val', 'nc', 'names'}
40 ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')"
41
42 data_dict = dict()
43 data_dict['train'] = str(
44 (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None
45 data_dict['test'] = str(
46 (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None
47 data_dict['val'] = str(
48 (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None
49 data_dict['nc'] = dataset_definition['nc']
50 data_dict['names'] = dataset_definition['names']
51
52 return data_dict
53
54
55 class ClearmlLogger:
56 """Log training runs, datasets, models, and predictions to ClearML.
57
58 This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,
59 this information includes hyperparameters, system configuration and metrics, model metrics, code information and
60 basic data metrics and analyses.
61
62 By providing additional command line arguments to train.py, datasets,
63 models and predictions can also be logged.
64 """
65
66 def __init__(self, opt, hyp):
67 """
68 - Initialize ClearML Task, this object will capture the experiment
69 - Upload dataset version to ClearML Data if opt.upload_dataset is True
70
71 arguments:
72 opt (namespace) -- Commandline arguments for this run
73 hyp (dict) -- Hyperparameters for this run
74
75 """
76 self.current_epoch = 0
77 # Keep tracked of amount of logged images to enforce a limit
78 self.current_epoch_logged_images = set()
79 # Maximum number of images to log to clearML per epoch
80 self.max_imgs_to_log_per_epoch = 16
81 # Get the interval of epochs when bounding box images should be logged
82 self.bbox_interval = opt.bbox_interval
83 self.clearml = clearml
84 self.task = None
85 self.data_dict = None
86 if self.clearml:
87 self.task = Task.init(
88 project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',
89 task_name=opt.name if opt.name != 'exp' else 'Training',
90 tags=['YOLOv5'],
91 output_uri=True,
92 auto_connect_frameworks={'pytorch': False}
93 # We disconnect pytorch auto-detection, because we added manual model save points in the code
94 )
95 # ClearML's hooks will already grab all general parameters
96 # Only the hyperparameters coming from the yaml config file
97 # will have to be added manually!
98 self.task.connect(hyp, name='Hyperparameters')
99
100 # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent
101 self.task.set_base_docker("ultralytics/yolov5:latest",
102 docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"',
103 docker_setup_bash_script='pip install clearml')
104
105 # Get ClearML Dataset Version if requested
106 if opt.data.startswith('clearml://'):
107 # data_dict should have the following keys:
108 # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)
109 self.data_dict = construct_dataset(opt.data)
110 # Set data to data_dict because wandb will crash without this information and opt is the best way
111 # to give it to them
112 opt.data = self.data_dict
113
114 def log_debug_samples(self, files, title='Debug Samples'):
115 """
116 Log files (images) as debug samples in the ClearML task.
117
118 arguments:
119 files (List(PosixPath)) a list of file paths in PosixPath format
120 title (str) A title that groups together images with the same values
121 """
122 for f in files:
123 if f.exists():
124 it = re.search(r'_batch(\d+)', f.name)
125 iteration = int(it.groups()[0]) if it else 0
126 self.task.get_logger().report_image(title=title,
127 series=f.name.replace(it.group(), ''),
128 local_path=str(f),
129 iteration=iteration)
130
131 def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):
132 """
133 Draw the bounding boxes on a single image and report the result as a ClearML debug sample.
134
135 arguments:
136 image_path (PosixPath) the path the original image file
137 boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
138 class_names (dict): dict containing mapping of class int to class name
139 image (Tensor): A torch tensor containing the actual image data
140 """
141 if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:
142 # Log every bbox_interval times and deduplicate for any intermittend extra eval runs
143 if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:
144 im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))
145 annotator = Annotator(im=im, pil=True)
146 for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):
147 color = colors(i)
148
149 class_name = class_names[int(class_nr)]
150 confidence_percentage = round(float(conf) * 100, 2)
151 label = f"{class_name}: {confidence_percentage}%"
152
153 if conf > conf_threshold:
154 annotator.rectangle(box.cpu().numpy(), outline=color)
155 annotator.box_label(box.cpu().numpy(), label=label, color=color)
156
157 annotated_image = annotator.result()
158 self.task.get_logger().report_image(title='Bounding Boxes',
159 series=image_path.name,
160 iteration=self.current_epoch,
161 image=annotated_image)
162 self.current_epoch_logged_images.add(image_path)
163
[end of utils/loggers/clearml/clearml_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py
--- a/utils/loggers/clearml/clearml_utils.py
+++ b/utils/loggers/clearml/clearml_utils.py
@@ -96,6 +96,7 @@
# Only the hyperparameters coming from the yaml config file
# will have to be added manually!
self.task.connect(hyp, name='Hyperparameters')
+ self.task.connect(opt, name='Args')
# Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent
self.task.set_base_docker("ultralytics/yolov5:latest",
| {"golden_diff": "diff --git a/utils/loggers/clearml/clearml_utils.py b/utils/loggers/clearml/clearml_utils.py\n--- a/utils/loggers/clearml/clearml_utils.py\n+++ b/utils/loggers/clearml/clearml_utils.py\n@@ -96,6 +96,7 @@\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n+ self.task.connect(opt, name='Args')\n \n # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n self.task.set_base_docker(\"ultralytics/yolov5:latest\",\n", "issue": "clearml logs defaults args when use run function \n### Search before asking\n\n- [X] I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report.\n\n\n### YOLOv5 Component\n\n_No response_\n\n### Bug\n\nwhen you want to run training with the `run` function as described here https://github.com/ultralytics/yolov5/blob/10c025d794ca395a2ca0b2a00aff65f3a92ecd8d/train.py#L622 \r\nthe clearml args logging \r\nhttps://github.com/allegroai/clearml/blob/7016138c849a4f8d0b4d296b319e0b23a1b7bd9e/clearml/binding/args.py#L231 \r\nskip the modification of the parameters \r\nin the end, when you look in the clearml server on configurations Args you see the default args parameters and not the one you trained with \r\n(Training is still with the good parameters you pass )\r\n\n\n### Environment\n\ndoker \r\n`from ultralytics/yolov5:latest`\n\n### Minimal Reproducible Example\n\npython\r\n```\r\nimport train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt')\r\n```\r\nin clearml server configurations Args you will see `weights='yolov5s.pt'` `imgsz=640`\n\n### Additional\n\n_No response_\n\n### Are you willing to submit a PR?\n\n- [ ] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "\"\"\"Main Logger class for ClearML experiment tracking.\"\"\"\nimport glob\nimport re\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\n\nfrom utils.plots import Annotator, colors\n\ntry:\n import clearml\n from clearml import Dataset, Task\n\n assert hasattr(clearml, '__version__') # verify package import not local dir\nexcept (ImportError, AssertionError):\n clearml = None\n\n\ndef construct_dataset(clearml_info_string):\n \"\"\"Load in a clearml dataset and fill the internal data_dict with its contents.\n \"\"\"\n dataset_id = clearml_info_string.replace('clearml://', '')\n dataset = Dataset.get(dataset_id=dataset_id)\n dataset_root_path = Path(dataset.get_local_copy())\n\n # We'll search for the yaml file definition in the dataset\n yaml_filenames = list(glob.glob(str(dataset_root_path / \"*.yaml\")) + glob.glob(str(dataset_root_path / \"*.yml\")))\n if len(yaml_filenames) > 1:\n raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains '\n 'the dataset definition this way.')\n elif len(yaml_filenames) == 0:\n raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file '\n 'inside the dataset root path.')\n with open(yaml_filenames[0]) as f:\n dataset_definition = yaml.safe_load(f)\n\n assert set(dataset_definition.keys()).issuperset(\n {'train', 'test', 'val', 'nc', 'names'}\n ), \"The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')\"\n\n data_dict = dict()\n data_dict['train'] = str(\n (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None\n data_dict['test'] = str(\n (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None\n data_dict['val'] = str(\n (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None\n data_dict['nc'] = dataset_definition['nc']\n data_dict['names'] = dataset_definition['names']\n\n return data_dict\n\n\nclass ClearmlLogger:\n \"\"\"Log training runs, datasets, models, and predictions to ClearML.\n\n This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default,\n this information includes hyperparameters, system configuration and metrics, model metrics, code information and\n basic data metrics and analyses.\n\n By providing additional command line arguments to train.py, datasets,\n models and predictions can also be logged.\n \"\"\"\n\n def __init__(self, opt, hyp):\n \"\"\"\n - Initialize ClearML Task, this object will capture the experiment\n - Upload dataset version to ClearML Data if opt.upload_dataset is True\n\n arguments:\n opt (namespace) -- Commandline arguments for this run\n hyp (dict) -- Hyperparameters for this run\n\n \"\"\"\n self.current_epoch = 0\n # Keep tracked of amount of logged images to enforce a limit\n self.current_epoch_logged_images = set()\n # Maximum number of images to log to clearML per epoch\n self.max_imgs_to_log_per_epoch = 16\n # Get the interval of epochs when bounding box images should be logged\n self.bbox_interval = opt.bbox_interval\n self.clearml = clearml\n self.task = None\n self.data_dict = None\n if self.clearml:\n self.task = Task.init(\n project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5',\n task_name=opt.name if opt.name != 'exp' else 'Training',\n tags=['YOLOv5'],\n output_uri=True,\n auto_connect_frameworks={'pytorch': False}\n # We disconnect pytorch auto-detection, because we added manual model save points in the code\n )\n # ClearML's hooks will already grab all general parameters\n # Only the hyperparameters coming from the yaml config file\n # will have to be added manually!\n self.task.connect(hyp, name='Hyperparameters')\n\n # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent\n self.task.set_base_docker(\"ultralytics/yolov5:latest\",\n docker_arguments='--ipc=host -e=\"CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1\"',\n docker_setup_bash_script='pip install clearml')\n\n # Get ClearML Dataset Version if requested\n if opt.data.startswith('clearml://'):\n # data_dict should have the following keys:\n # names, nc (number of classes), test, train, val (all three relative paths to ../datasets)\n self.data_dict = construct_dataset(opt.data)\n # Set data to data_dict because wandb will crash without this information and opt is the best way\n # to give it to them\n opt.data = self.data_dict\n\n def log_debug_samples(self, files, title='Debug Samples'):\n \"\"\"\n Log files (images) as debug samples in the ClearML task.\n\n arguments:\n files (List(PosixPath)) a list of file paths in PosixPath format\n title (str) A title that groups together images with the same values\n \"\"\"\n for f in files:\n if f.exists():\n it = re.search(r'_batch(\\d+)', f.name)\n iteration = int(it.groups()[0]) if it else 0\n self.task.get_logger().report_image(title=title,\n series=f.name.replace(it.group(), ''),\n local_path=str(f),\n iteration=iteration)\n\n def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25):\n \"\"\"\n Draw the bounding boxes on a single image and report the result as a ClearML debug sample.\n\n arguments:\n image_path (PosixPath) the path the original image file\n boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]\n class_names (dict): dict containing mapping of class int to class name\n image (Tensor): A torch tensor containing the actual image data\n \"\"\"\n if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0:\n # Log every bbox_interval times and deduplicate for any intermittend extra eval runs\n if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images:\n im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2))\n annotator = Annotator(im=im, pil=True)\n for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])):\n color = colors(i)\n\n class_name = class_names[int(class_nr)]\n confidence_percentage = round(float(conf) * 100, 2)\n label = f\"{class_name}: {confidence_percentage}%\"\n\n if conf > conf_threshold:\n annotator.rectangle(box.cpu().numpy(), outline=color)\n annotator.box_label(box.cpu().numpy(), label=label, color=color)\n\n annotated_image = annotator.result()\n self.task.get_logger().report_image(title='Bounding Boxes',\n series=image_path.name,\n iteration=self.current_epoch,\n image=annotated_image)\n self.current_epoch_logged_images.add(image_path)\n", "path": "utils/loggers/clearml/clearml_utils.py"}]} | 2,965 | 154 |
gh_patches_debug_41072 | rasdani/github-patches | git_diff | PaddlePaddle__PaddleSeg-1746 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
paddleseg/models/hrnet_contrast.py 中没有执行 init_weight
paddleseg/models/hrnet_contrast.py 中__init__()没有执行 init_weight,导致hrnet_w48_contrast 没法加载完整的模型
</issue>
<code>
[start of paddleseg/models/hrnet_contrast.py]
1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import paddle
16 import paddle.nn as nn
17 import paddle.nn.functional as F
18
19 from paddleseg.cvlibs import manager
20 from paddleseg.models import layers
21 from paddleseg.utils import utils
22
23
24 @manager.MODELS.add_component
25 class HRNetW48Contrast(nn.Layer):
26 """
27 The HRNetW48Contrast implementation based on PaddlePaddle.
28
29 The original article refers to
30 Wenguan Wang, Tianfei Zhou, et al. "Exploring Cross-Image Pixel Contrast for Semantic Segmentation"
31 (https://arxiv.org/abs/2101.11939).
32
33 Args:
34 in_channels (int): The output dimensions of backbone.
35 num_classes (int): The unique number of target classes.
36 backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.
37 drop_prob (float): The probability of dropout.
38 proj_dim (int): The projection dimensions.
39 align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,
40 e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
41 pretrained (str, optional): The path or url of pretrained model. Default: None.
42 """
43 def __init__(self,
44 in_channels,
45 num_classes,
46 backbone,
47 drop_prob,
48 proj_dim,
49 align_corners=False,
50 pretrained=None):
51 super().__init__()
52 self.in_channels = in_channels
53 self.backbone = backbone
54 self.num_classes = num_classes
55 self.proj_dim = proj_dim
56 self.align_corners = align_corners
57 self.pretrained = pretrained
58
59 self.cls_head = nn.Sequential(
60 layers.ConvBNReLU(in_channels,
61 in_channels,
62 kernel_size=3,
63 stride=1,
64 padding=1),
65 nn.Dropout2D(drop_prob),
66 nn.Conv2D(in_channels,
67 num_classes,
68 kernel_size=1,
69 stride=1,
70 bias_attr=False),
71 )
72 self.proj_head = ProjectionHead(dim_in=in_channels,
73 proj_dim=self.proj_dim)
74
75 def init_weight(self):
76 if self.pretrained is not None:
77 utils.load_entire_model(self, self.pretrained)
78
79 def forward(self, x):
80 feats = self.backbone(x)[0]
81 out = self.cls_head(feats)
82 logit_list = []
83 if self.training:
84 emb = self.proj_head(feats)
85 logit_list.append(
86 F.interpolate(out,
87 paddle.shape(x)[2:],
88 mode='bilinear',
89 align_corners=self.align_corners))
90 logit_list.append({'seg': out, 'embed': emb})
91 else:
92 logit_list.append(
93 F.interpolate(out,
94 paddle.shape(x)[2:],
95 mode='bilinear',
96 align_corners=self.align_corners))
97 return logit_list
98
99
100 class ProjectionHead(nn.Layer):
101 """
102 The projection head used by contrast learning.
103 Args:
104 dim_in (int): The dimensions of input features.
105 proj_dim (int, optional): The output dimensions of projection head. Default: 256.
106 proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
107 """
108 def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
109 super(ProjectionHead, self).__init__()
110 if proj == 'linear':
111 self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)
112 elif proj == 'convmlp':
113 self.proj = nn.Sequential(
114 layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),
115 nn.Conv2D(dim_in, proj_dim, kernel_size=1),
116 )
117 else:
118 raise ValueError(
119 "The type of project head only support 'linear' and 'convmlp', but got {}."
120 .format(proj))
121
122 def forward(self, x):
123 return F.normalize(self.proj(x), p=2, axis=1)
124
[end of paddleseg/models/hrnet_contrast.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py
--- a/paddleseg/models/hrnet_contrast.py
+++ b/paddleseg/models/hrnet_contrast.py
@@ -40,6 +40,7 @@
e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.
pretrained (str, optional): The path or url of pretrained model. Default: None.
"""
+
def __init__(self,
in_channels,
num_classes,
@@ -54,23 +55,23 @@
self.num_classes = num_classes
self.proj_dim = proj_dim
self.align_corners = align_corners
- self.pretrained = pretrained
self.cls_head = nn.Sequential(
- layers.ConvBNReLU(in_channels,
- in_channels,
- kernel_size=3,
- stride=1,
- padding=1),
+ layers.ConvBNReLU(
+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),
nn.Dropout2D(drop_prob),
- nn.Conv2D(in_channels,
- num_classes,
- kernel_size=1,
- stride=1,
- bias_attr=False),
+ nn.Conv2D(
+ in_channels,
+ num_classes,
+ kernel_size=1,
+ stride=1,
+ bias_attr=False),
)
- self.proj_head = ProjectionHead(dim_in=in_channels,
- proj_dim=self.proj_dim)
+ self.proj_head = ProjectionHead(
+ dim_in=in_channels, proj_dim=self.proj_dim)
+
+ self.pretrained = pretrained
+ self.init_weight()
def init_weight(self):
if self.pretrained is not None:
@@ -83,17 +84,19 @@
if self.training:
emb = self.proj_head(feats)
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
logit_list.append({'seg': out, 'embed': emb})
else:
logit_list.append(
- F.interpolate(out,
- paddle.shape(x)[2:],
- mode='bilinear',
- align_corners=self.align_corners))
+ F.interpolate(
+ out,
+ paddle.shape(x)[2:],
+ mode='bilinear',
+ align_corners=self.align_corners))
return logit_list
@@ -105,6 +108,7 @@
proj_dim (int, optional): The output dimensions of projection head. Default: 256.
proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.
"""
+
def __init__(self, dim_in, proj_dim=256, proj='convmlp'):
super(ProjectionHead, self).__init__()
if proj == 'linear':
| {"golden_diff": "diff --git a/paddleseg/models/hrnet_contrast.py b/paddleseg/models/hrnet_contrast.py\n--- a/paddleseg/models/hrnet_contrast.py\n+++ b/paddleseg/models/hrnet_contrast.py\n@@ -40,6 +40,7 @@\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n+\n def __init__(self,\n in_channels,\n num_classes,\n@@ -54,23 +55,23 @@\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n- self.pretrained = pretrained\n \n self.cls_head = nn.Sequential(\n- layers.ConvBNReLU(in_channels,\n- in_channels,\n- kernel_size=3,\n- stride=1,\n- padding=1),\n+ layers.ConvBNReLU(\n+ in_channels, in_channels, kernel_size=3, stride=1, padding=1),\n nn.Dropout2D(drop_prob),\n- nn.Conv2D(in_channels,\n- num_classes,\n- kernel_size=1,\n- stride=1,\n- bias_attr=False),\n+ nn.Conv2D(\n+ in_channels,\n+ num_classes,\n+ kernel_size=1,\n+ stride=1,\n+ bias_attr=False),\n )\n- self.proj_head = ProjectionHead(dim_in=in_channels,\n- proj_dim=self.proj_dim)\n+ self.proj_head = ProjectionHead(\n+ dim_in=in_channels, proj_dim=self.proj_dim)\n+\n+ self.pretrained = pretrained\n+ self.init_weight()\n \n def init_weight(self):\n if self.pretrained is not None:\n@@ -83,17 +84,19 @@\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n- F.interpolate(out,\n- paddle.shape(x)[2:],\n- mode='bilinear',\n- align_corners=self.align_corners))\n+ F.interpolate(\n+ out,\n+ paddle.shape(x)[2:],\n+ mode='bilinear',\n+ align_corners=self.align_corners))\n return logit_list\n \n \n@@ -105,6 +108,7 @@\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n+\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n", "issue": "paddleseg/models/hrnet_contrast.py \u4e2d\u6ca1\u6709\u6267\u884c init_weight\npaddleseg/models/hrnet_contrast.py \u4e2d__init__()\u6ca1\u6709\u6267\u884c init_weight\uff0c\u5bfc\u81f4hrnet_w48_contrast \u6ca1\u6cd5\u52a0\u8f7d\u5b8c\u6574\u7684\u6a21\u578b\n", "before_files": [{"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\n\nfrom paddleseg.cvlibs import manager\nfrom paddleseg.models import layers\nfrom paddleseg.utils import utils\n\n\[email protected]_component\nclass HRNetW48Contrast(nn.Layer):\n \"\"\"\n The HRNetW48Contrast implementation based on PaddlePaddle.\n\n The original article refers to\n Wenguan Wang, Tianfei Zhou, et al. \"Exploring Cross-Image Pixel Contrast for Semantic Segmentation\"\n (https://arxiv.org/abs/2101.11939).\n\n Args:\n in_channels (int): The output dimensions of backbone.\n num_classes (int): The unique number of target classes.\n backbone (Paddle.nn.Layer): Backbone network, currently support HRNet_W48.\n drop_prob (float): The probability of dropout.\n proj_dim (int): The projection dimensions.\n align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even,\n e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False.\n pretrained (str, optional): The path or url of pretrained model. Default: None.\n \"\"\"\n def __init__(self,\n in_channels,\n num_classes,\n backbone,\n drop_prob,\n proj_dim,\n align_corners=False,\n pretrained=None):\n super().__init__()\n self.in_channels = in_channels\n self.backbone = backbone\n self.num_classes = num_classes\n self.proj_dim = proj_dim\n self.align_corners = align_corners\n self.pretrained = pretrained\n\n self.cls_head = nn.Sequential(\n layers.ConvBNReLU(in_channels,\n in_channels,\n kernel_size=3,\n stride=1,\n padding=1),\n nn.Dropout2D(drop_prob),\n nn.Conv2D(in_channels,\n num_classes,\n kernel_size=1,\n stride=1,\n bias_attr=False),\n )\n self.proj_head = ProjectionHead(dim_in=in_channels,\n proj_dim=self.proj_dim)\n\n def init_weight(self):\n if self.pretrained is not None:\n utils.load_entire_model(self, self.pretrained)\n\n def forward(self, x):\n feats = self.backbone(x)[0]\n out = self.cls_head(feats)\n logit_list = []\n if self.training:\n emb = self.proj_head(feats)\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n logit_list.append({'seg': out, 'embed': emb})\n else:\n logit_list.append(\n F.interpolate(out,\n paddle.shape(x)[2:],\n mode='bilinear',\n align_corners=self.align_corners))\n return logit_list\n\n\nclass ProjectionHead(nn.Layer):\n \"\"\"\n The projection head used by contrast learning.\n Args:\n dim_in (int): The dimensions of input features.\n proj_dim (int, optional): The output dimensions of projection head. Default: 256.\n proj (str, optional): The type of projection head, only support 'linear' and 'convmlp'. Default: 'convmlp'.\n \"\"\"\n def __init__(self, dim_in, proj_dim=256, proj='convmlp'):\n super(ProjectionHead, self).__init__()\n if proj == 'linear':\n self.proj = nn.Conv2D(dim_in, proj_dim, kernel_size=1)\n elif proj == 'convmlp':\n self.proj = nn.Sequential(\n layers.ConvBNReLU(dim_in, dim_in, kernel_size=1),\n nn.Conv2D(dim_in, proj_dim, kernel_size=1),\n )\n else:\n raise ValueError(\n \"The type of project head only support 'linear' and 'convmlp', but got {}.\"\n .format(proj))\n\n def forward(self, x):\n return F.normalize(self.proj(x), p=2, axis=1)\n", "path": "paddleseg/models/hrnet_contrast.py"}]} | 1,880 | 701 |
gh_patches_debug_9486 | rasdani/github-patches | git_diff | cal-itp__benefits-1092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Page titles contain extra spaces, characters
Our page titles (the string that shows up e.g. in the browser tab) are a little funky. They contain a bunch of extra spaces and weird formatting. For example, the `dev` site has this in the HTML source:
```html
<title>
Transit Benefits: Introduction |
Cal-ITP
</title>
```
In the browser this isn't really a problem; extra spaces are truncated in the tabs:

However in certain situations, especially where links are expanded for previews (like in Social Media sites, comms apps like Slack, etc.), this extra spacing and weird formatting is more apparent:

## To Reproduce
Steps to reproduce the behavior:
1. Copy a link from the public facing Benefits app, e.g. https://benefits.calitp.org
2. Paste this link into an app that can expands links for previews, like Slack
3. See the weird title formatting in the expanded preview
## Expected behavior
* There shouldn't be any extra spaces in the titles
* We should confirm this is the pattern we want (I'm thinking...probably not?) `Transit Benefits: <Page Title> | Cal-ITP`
* Alternate suggestion: `<Page Title> | Cal-ITP Benefits`
</issue>
<code>
[start of benefits/core/viewmodels.py]
1 """
2 The core application: view model definitions for the root of the webapp.
3 """
4 from django.utils.translation import pgettext, gettext_lazy as _
5 from django.urls import reverse
6
7 from benefits.core import models
8
9 from . import session
10
11
12 class Button:
13 """
14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):
15 * classes: str, str[]
16 * id: str
17 * fallback_text: str
18 * label: str
19 * text: str
20 * url: str
21 * target: str
22 * rel: str
23 """
24
25 def __init__(self, **kwargs):
26 classes = kwargs.get("classes", [])
27 if isinstance(classes, str):
28 classes = classes.split()
29
30 self.classes = ["btn", "btn-lg"]
31 self.classes.extend(classes)
32 self.id = kwargs.get("id")
33 self.fallback_text = kwargs.get("fallback_text")
34 self.label = kwargs.get("label")
35 self.text = kwargs.get("text", "Button")
36 self.url = kwargs.get("url")
37 self.target = kwargs.get("target")
38 self.rel = kwargs.get("rel")
39
40 @staticmethod
41 def agency_contact_links(agency):
42 """Create link buttons for agency contact information."""
43 return [
44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"),
45 Button.link(
46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer"
47 ),
48 ]
49
50 @staticmethod
51 def home(request, text=None):
52 """Create a button back to this session's origin."""
53 if text is None:
54 text = _("core.buttons.return_home")
55
56 return Button.primary(text=text, url=session.origin(request))
57
58 @staticmethod
59 def link(**kwargs):
60 classes = kwargs.pop("classes", [])
61 if isinstance(classes, str):
62 classes = classes.split(" ")
63 classes.insert(0, "btn-link")
64 return Button(classes=classes, **kwargs)
65
66 @staticmethod
67 def primary(**kwargs):
68 classes = kwargs.pop("classes", [])
69 if isinstance(classes, str):
70 classes = classes.split(" ")
71 classes.insert(0, "btn-primary")
72 return Button(classes=classes, **kwargs)
73
74 @staticmethod
75 def outline_primary(**kwargs):
76 classes = kwargs.pop("classes", [])
77 if isinstance(classes, str):
78 classes = classes.split(" ")
79 classes.insert(0, "btn-outline-primary")
80 return Button(classes=classes, **kwargs)
81
82 @staticmethod
83 def login(**kwargs):
84 """Create a login.gov button, with a login.gov logo and fallback text"""
85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs)
86 return btn
87
88 @staticmethod
89 def logout(**kwargs):
90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text"""
91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs)
92 return btn
93
94 @staticmethod
95 def previous_page(url):
96 return Button(text=_("core.buttons.previous_page"), url=url)
97
98
99 class Icon:
100 """Represents an icon."""
101
102 def __init__(self, icon, alt):
103 self.src = f"img/icon/{icon}.svg"
104 self.alt = alt
105
106
107 class MediaItem:
108 """
109 Represents a media item in a list of items:
110 * icon: core.viewmodels.Icon
111 * details: str, str[]
112 * heading: str
113 * bullets: str, str[]
114 """
115
116 def __init__(self, icon: Icon, details, heading=None, bullets=None):
117 self.icon = icon
118 if isinstance(details, str):
119 self.details = [details]
120 elif isinstance(details, list):
121 self.details = details
122 else:
123 self.details = [str(details)]
124 self.heading = heading
125 if isinstance(bullets, str):
126 self.bullets = [bullets]
127 elif isinstance(bullets, list):
128 self.bullets = bullets
129
130
131 class Page:
132 """
133 Represents a page of content:
134 * title: str
135 * icon: core.viewmodels.Icon
136 * headline: str
137 * paragraphs: str[]
138 * form: django.forms.Form
139 * forms: django.forms.Form[]
140 * button: core.viewmodels.Button
141 * buttons: core.viewmodels.Button[]
142 * classes: str[]
143 """
144
145 def __init__(self, **kwargs):
146 self.title = kwargs.get("title")
147 if self.title is None:
148 self.title = _("core.pages.index.prefix")
149 else:
150 self.title = f"{_('core.pages.index.prefix')}: {self.title}"
151
152 self.icon = kwargs.get("icon")
153 self.headline = kwargs.get("headline")
154 self.paragraphs = kwargs.get("paragraphs", [])
155 self.steps = kwargs.get("steps")
156
157 self.forms = kwargs.get("forms", [])
158 if not isinstance(self.forms, list):
159 self.forms = [self.forms]
160 if "form" in kwargs:
161 self.forms.append(kwargs.get("form"))
162
163 self.buttons = kwargs.get("buttons", [])
164 if not isinstance(self.buttons, list):
165 self.buttons = [self.buttons]
166 if "button" in kwargs:
167 self.buttons.append(kwargs.get("button"))
168
169 self.classes = kwargs.get("classes", [])
170 if not isinstance(self.classes, list):
171 self.classes = self.classes.split(" ")
172
173 def context_dict(self):
174 """Return a context dict for a Page."""
175 return {"page": self}
176
177
178 class ErrorPage(Page):
179 """
180 Represents an error page:
181 * title: str
182 * icon: core.viewmodels.Icon
183 * headline: str
184 * paragraphs: str[]
185 * button: core.viewmodels.Button
186 """
187
188 def __init__(self, **kwargs):
189 super().__init__(
190 title=kwargs.get("title", _("core.pages.error.title")),
191 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))),
192 headline=kwargs.get("headline", _("core.pages.error.title")),
193 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.headline")]),
194 button=kwargs.get("button"),
195 )
196
197 @staticmethod
198 def user_error(
199 title=_("core.pages.user_error.title"),
200 headline=_("core.pages.user_error.headline"),
201 paragraphs=[_("core.pages.user_error.p[0]")],
202 **kwargs,
203 ):
204 """Create a new core.viewmodels.ErrorPage instance with defaults for a user error."""
205 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
206
207 @staticmethod
208 def server_error(
209 title=_("core.pages.server_error.title"),
210 headline=_("core.pages.server_error.title"),
211 paragraphs=[_("core.pages.server_error.p[0]")],
212 **kwargs,
213 ):
214 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
215 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
216
217 @staticmethod
218 def not_found(
219 title=_("core.pages.not_found.title"),
220 headline=_("core.pages.not_found.headline"),
221 paragraphs=[_("core.pages.not_found.p[0]")],
222 **kwargs,
223 ):
224 """Create a new core.viewmodels.ErrorPage with defaults for a 404."""
225 path = kwargs.pop("path", None)
226 if path and title:
227 title = f"{title}: {path}"
228 elif path and not title:
229 title = path
230 return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)
231
232
233 class PaymentProcessor:
234 """
235 Represents a core.models.PaymentProcessor:
236 * model: core.models.PaymentProcessor
237 * access_token_url: str
238 * element_id: str
239 * color: str
240 * [name: str]
241 * [loading_text: str]
242 """
243
244 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")):
245 if isinstance(model, models.PaymentProcessor):
246 self.access_token_url = access_token_url
247 self.element_id = element_id
248 self.color = color
249 self.name = name or model.name
250 self.loading_text = loading_text
251 self.card_tokenize_url = model.card_tokenize_url
252 self.card_tokenize_func = model.card_tokenize_func
253 self.card_tokenize_env = model.card_tokenize_env
254
255 def context_dict(self):
256 """Return a context dict for a PaymentProcessor."""
257 return {"payment_processor": self}
258
259
260 class TransitAgency:
261 """
262 Represents a core.models.TransitAgency:
263 * model: core.models.TransitAgency
264 """
265
266 def __init__(self, model):
267 if isinstance(model, models.TransitAgency):
268 self.slug = model.slug
269 self.short_name = model.short_name
270 self.long_name = model.long_name
271 self.agency_id = model.agency_id
272 self.merchant_id = model.merchant_id
273 self.info_url = model.info_url
274 self.phone = model.phone
275
276 def context_dict(self):
277 """Return a context dict for a TransitAgency."""
278 return {"agency": self}
279
[end of benefits/core/viewmodels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py
--- a/benefits/core/viewmodels.py
+++ b/benefits/core/viewmodels.py
@@ -143,12 +143,9 @@
"""
def __init__(self, **kwargs):
- self.title = kwargs.get("title")
- if self.title is None:
- self.title = _("core.pages.index.prefix")
- else:
- self.title = f"{_('core.pages.index.prefix')}: {self.title}"
-
+ title = kwargs.get("title")
+ if title is not None:
+ self.title = title
self.icon = kwargs.get("icon")
self.headline = kwargs.get("headline")
self.paragraphs = kwargs.get("paragraphs", [])
| {"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -143,12 +143,9 @@\n \"\"\"\n \n def __init__(self, **kwargs):\n- self.title = kwargs.get(\"title\")\n- if self.title is None:\n- self.title = _(\"core.pages.index.prefix\")\n- else:\n- self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n-\n+ title = kwargs.get(\"title\")\n+ if title is not None:\n+ self.title = title\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n", "issue": "Page titles contain extra spaces, characters\nOur page titles (the string that shows up e.g. in the browser tab) are a little funky. They contain a bunch of extra spaces and weird formatting. For example, the `dev` site has this in the HTML source:\r\n\r\n```html\r\n<title>\r\n \r\n Transit Benefits: Introduction |\r\n \r\n Cal-ITP\r\n </title>\r\n```\r\n\r\nIn the browser this isn't really a problem; extra spaces are truncated in the tabs:\r\n\r\n\r\n\r\nHowever in certain situations, especially where links are expanded for previews (like in Social Media sites, comms apps like Slack, etc.), this extra spacing and weird formatting is more apparent:\r\n\r\n\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n1. Copy a link from the public facing Benefits app, e.g. https://benefits.calitp.org\r\n2. Paste this link into an app that can expands links for previews, like Slack\r\n3. See the weird title formatting in the expanded preview \r\n\r\n## Expected behavior\r\n\r\n* There shouldn't be any extra spaces in the titles\r\n* We should confirm this is the pattern we want (I'm thinking...probably not?) `Transit Benefits: <Page Title> | Cal-ITP`\r\n * Alternate suggestion: `<Page Title> | Cal-ITP Benefits`\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n @staticmethod\n def previous_page(url):\n return Button(text=_(\"core.buttons.previous_page\"), url=url)\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass MediaItem:\n \"\"\"\n Represents a media item in a list of items:\n * icon: core.viewmodels.Icon\n * details: str, str[]\n * heading: str\n * bullets: str, str[]\n \"\"\"\n\n def __init__(self, icon: Icon, details, heading=None, bullets=None):\n self.icon = icon\n if isinstance(details, str):\n self.details = [details]\n elif isinstance(details, list):\n self.details = details\n else:\n self.details = [str(details)]\n self.heading = heading\n if isinstance(bullets, str):\n self.bullets = [bullets]\n elif isinstance(bullets, list):\n self.bullets = bullets\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.icon = kwargs.get(\"icon\")\n self.headline = kwargs.get(\"headline\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * headline: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n headline=kwargs.get(\"headline\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.headline\")]),\n button=kwargs.get(\"button\"),\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n headline=_(\"core.pages.user_error.headline\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n headline=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n headline=_(\"core.pages.not_found.headline\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, headline=headline, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]} | 3,667 | 176 |
gh_patches_debug_17289 | rasdani/github-patches | git_diff | psychopy__psychopy-2031 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
PIL.Image fail in combineImageEmblem
When launching 3.0.0.b in Windows
```
Traceback (most recent call last):
File "C:\a\Python\Scripts\psychopyApp.py", line 96, in <module>
start_app()
File "C:\a\Python\Scripts\psychopyApp.py", line 28, in start_app
app = PsychoPyApp(0, showSplash=showSplash)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 181, in __init__
self.onInit(testMode=testMode, **kwargs)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 307, in onInit
self.showCoder(fileList=scripts)
File "C:\a\Python\lib\site-packages\psychopy\app\_psychopyApp.py", line 499, in showCoder
files=fileList, app=self)
File "C:\a\Python\lib\site-packages\psychopy\app\coder\coder.py", line 1426, in __init__
self.makeToolbar() # must be before the paneManager for some reason
File "C:\a\Python\lib\site-packages\psychopy\app\coder\coder.py", line 2000, in makeToolbar
pavButtons.addPavloviaTools(buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser', ])
File "C:\a\Python\lib\site-packages\psychopy\app\pavlovia_ui\toolbar.py", line 33, in addPavloviaT
ools
emblem=join(rc, emblem), pos='bottom_right'))
File "C:\a\Python\lib\site-packages\psychopy\app\icons.py", line 69, in combineImageEmblem
main.paste(emblem, [x, y], mask=emblem)
File "C:\a\Python\lib\site-packages\PIL\Image.py", line 1339, in paste
box = box + (box[0]+size[0], box[1]+size[1])
TypeError: can only concatenate list (not "tuple") to list
```
Is fixed by change of brackets at psychopy\app\icons.py:69 :
`
main.paste(emblem, [x, y], mask=emblem)`
to
`
main.paste(emblem, (x, y), mask=emblem)`
</issue>
<code>
[start of psychopy/app/icons.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # Part of the PsychoPy library
5 # Copyright (C) 2018 Jonathan Peirce
6 # Distributed under the terms of the GNU General Public License (GPL).
7
8 """utility classes for the Builder
9 """
10
11 from __future__ import absolute_import, division, print_function
12 from os.path import join, abspath, dirname
13
14 from pkg_resources import parse_version
15 from PIL import Image
16 import wx
17
18 from psychopy import experiment, prefs
19 from psychopy.experiment import components
20
21 resourcesPath = prefs.paths['resources']
22
23 def pilToBitmap(pil, scaleFactor=1.0):
24 if parse_version(wx.__version__) < parse_version('4.0.0a1'):
25 image = wx.EmptyImage(pil.size[0], pil.size[1])
26 else:
27 image = wx.Image(pil.size[0], pil.size[1])
28
29 # set the RGB values
30 if hasattr(pil, 'tobytes'):
31 image.SetData(pil.convert("RGB").tobytes())
32 image.SetAlphaBuffer(pil.convert("RGBA").tobytes()[3::4])
33 else:
34 image.SetData(pil.convert("RGB").tostring())
35 image.SetAlphaData(pil.convert("RGBA").tostring()[3::4])
36
37 image.Rescale(image.Width * scaleFactor, image.Height * scaleFactor)
38 return image.ConvertToBitmap() # wx.Image and wx.Bitmap are different
39
40
41 def combineImageEmblem(main, emblem, pos='top_left'):
42 """
43
44 Parameters
45 ----------
46 main: filename
47 emblem: filename
48 pos: str ('bottom_left' etc)
49 size: int (default=16)
50
51 Returns
52 -------
53 A wx.Bitmap of the combined image ready for use in wxButton
54 """
55 # load images if they aren't already loaded
56 main = Image.open(main).convert('RGBA') # might be grey or indexed colors
57 emblem = Image.open(emblem).convert('RGBA')
58 if 'bottom' in pos:
59 y = main.size[1] - emblem.size[1]
60 elif 'top' in pos:
61 y = 0
62 if 'right' in pos:
63 x = main.size[0] - emblem.size[0]
64 elif 'left' in pos:
65 x = 0
66 elif 'center' in pos:
67 x = int(main.size[0]/2-emblem.size[1]/2)
68
69 main.paste(emblem, [x, y], mask=emblem)
70 return pilToBitmap(main)
71
72 _allIcons = None
73
74
75 def getAllIcons(folderList=(), forceReload=False):
76 """load the icons for all the components
77 """
78 global _allIcons
79 if forceReload or _allIcons is None:
80 compons = experiment.getAllComponents(folderList)
81 _allIcons = {}
82 for thisName, thisCompon in compons.items():
83 if thisName in components.iconFiles:
84 _allIcons[thisName] = getIcons(components.iconFiles[thisName])
85 else:
86 _allIcons[thisName] = getIcons(None)
87 return _allIcons
88 else:
89 return _allIcons
90
91
92 def getIcons(filename=None):
93 """Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.
94 The latter has a plus sign added over the top.
95
96 png files work best, but anything that wx.Image can import should be fine
97 """
98 icons = {}
99 if filename is None:
100 filename = join(resourcesPath, 'base.png')
101
102 # get the low-res version first
103 im = Image.open(filename)
104 icons['24'] = pilToBitmap(im, scaleFactor=0.5)
105 icons['24add'] = pilToBitmap(im, scaleFactor=0.5)
106 # try to find a 128x128 version
107 filename128 = filename[:-4]+'128.png'
108 if False: # TURN OFF FOR NOW os.path.isfile(filename128):
109 im = Image.open(filename128)
110 else:
111 im = Image.open(filename)
112 icons['48'] = pilToBitmap(im)
113 # add the plus sign
114 add = Image.open(join(resourcesPath, 'add.png'))
115 im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
116 # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
117 # im.size[0], im.size[1]], mask=add)
118 icons['48add'] = pilToBitmap(im)
119
120 return icons
[end of psychopy/app/icons.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/psychopy/app/icons.py b/psychopy/app/icons.py
--- a/psychopy/app/icons.py
+++ b/psychopy/app/icons.py
@@ -66,7 +66,7 @@
elif 'center' in pos:
x = int(main.size[0]/2-emblem.size[1]/2)
- main.paste(emblem, [x, y], mask=emblem)
+ main.paste(emblem, (x, y), mask=emblem)
return pilToBitmap(main)
_allIcons = None
@@ -112,7 +112,7 @@
icons['48'] = pilToBitmap(im)
# add the plus sign
add = Image.open(join(resourcesPath, 'add.png'))
- im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)
+ im.paste(add, (0, 0, add.size[0], add.size[1]), mask=add)
# im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],
# im.size[0], im.size[1]], mask=add)
icons['48add'] = pilToBitmap(im)
| {"golden_diff": "diff --git a/psychopy/app/icons.py b/psychopy/app/icons.py\n--- a/psychopy/app/icons.py\n+++ b/psychopy/app/icons.py\n@@ -66,7 +66,7 @@\n elif 'center' in pos:\n x = int(main.size[0]/2-emblem.size[1]/2)\n \n- main.paste(emblem, [x, y], mask=emblem)\n+ main.paste(emblem, (x, y), mask=emblem)\n return pilToBitmap(main)\n \n _allIcons = None\n@@ -112,7 +112,7 @@\n icons['48'] = pilToBitmap(im)\n # add the plus sign\n add = Image.open(join(resourcesPath, 'add.png'))\n- im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)\n+ im.paste(add, (0, 0, add.size[0], add.size[1]), mask=add)\n # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],\n # im.size[0], im.size[1]], mask=add)\n icons['48add'] = pilToBitmap(im)\n", "issue": "PIL.Image fail in combineImageEmblem\nWhen launching 3.0.0.b in Windows\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\a\\Python\\Scripts\\psychopyApp.py\", line 96, in <module>\r\n start_app()\r\n File \"C:\\a\\Python\\Scripts\\psychopyApp.py\", line 28, in start_app\r\n app = PsychoPyApp(0, showSplash=showSplash)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 181, in __init__\r\n self.onInit(testMode=testMode, **kwargs)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 307, in onInit\r\n self.showCoder(fileList=scripts)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\_psychopyApp.py\", line 499, in showCoder\r\n files=fileList, app=self)\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\coder\\coder.py\", line 1426, in __init__\r\n self.makeToolbar() # must be before the paneManager for some reason\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\coder\\coder.py\", line 2000, in makeToolbar\r\n pavButtons.addPavloviaTools(buttons=['pavloviaSync', 'pavloviaSearch', 'pavloviaUser', ])\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\pavlovia_ui\\toolbar.py\", line 33, in addPavloviaT\r\nools\r\n emblem=join(rc, emblem), pos='bottom_right'))\r\n File \"C:\\a\\Python\\lib\\site-packages\\psychopy\\app\\icons.py\", line 69, in combineImageEmblem\r\n main.paste(emblem, [x, y], mask=emblem)\r\n File \"C:\\a\\Python\\lib\\site-packages\\PIL\\Image.py\", line 1339, in paste\r\n box = box + (box[0]+size[0], box[1]+size[1])\r\nTypeError: can only concatenate list (not \"tuple\") to list\r\n```\r\n\r\n\r\nIs fixed by change of brackets at psychopy\\app\\icons.py:69 :\r\n`\r\nmain.paste(emblem, [x, y], mask=emblem)`\r\n\r\nto \r\n`\r\nmain.paste(emblem, (x, y), mask=emblem)`\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2018 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\n\"\"\"utility classes for the Builder\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\nfrom os.path import join, abspath, dirname\n\nfrom pkg_resources import parse_version\nfrom PIL import Image\nimport wx\n\nfrom psychopy import experiment, prefs\nfrom psychopy.experiment import components\n\nresourcesPath = prefs.paths['resources']\n\ndef pilToBitmap(pil, scaleFactor=1.0):\n if parse_version(wx.__version__) < parse_version('4.0.0a1'):\n image = wx.EmptyImage(pil.size[0], pil.size[1])\n else:\n image = wx.Image(pil.size[0], pil.size[1])\n\n # set the RGB values\n if hasattr(pil, 'tobytes'):\n image.SetData(pil.convert(\"RGB\").tobytes())\n image.SetAlphaBuffer(pil.convert(\"RGBA\").tobytes()[3::4])\n else:\n image.SetData(pil.convert(\"RGB\").tostring())\n image.SetAlphaData(pil.convert(\"RGBA\").tostring()[3::4])\n\n image.Rescale(image.Width * scaleFactor, image.Height * scaleFactor)\n return image.ConvertToBitmap() # wx.Image and wx.Bitmap are different\n\n\ndef combineImageEmblem(main, emblem, pos='top_left'):\n \"\"\"\n\n Parameters\n ----------\n main: filename\n emblem: filename\n pos: str ('bottom_left' etc)\n size: int (default=16)\n\n Returns\n -------\n A wx.Bitmap of the combined image ready for use in wxButton\n \"\"\"\n # load images if they aren't already loaded\n main = Image.open(main).convert('RGBA') # might be grey or indexed colors\n emblem = Image.open(emblem).convert('RGBA')\n if 'bottom' in pos:\n y = main.size[1] - emblem.size[1]\n elif 'top' in pos:\n y = 0\n if 'right' in pos:\n x = main.size[0] - emblem.size[0]\n elif 'left' in pos:\n x = 0\n elif 'center' in pos:\n x = int(main.size[0]/2-emblem.size[1]/2)\n\n main.paste(emblem, [x, y], mask=emblem)\n return pilToBitmap(main)\n\n_allIcons = None\n\n\ndef getAllIcons(folderList=(), forceReload=False):\n \"\"\"load the icons for all the components\n \"\"\"\n global _allIcons\n if forceReload or _allIcons is None:\n compons = experiment.getAllComponents(folderList)\n _allIcons = {}\n for thisName, thisCompon in compons.items():\n if thisName in components.iconFiles:\n _allIcons[thisName] = getIcons(components.iconFiles[thisName])\n else:\n _allIcons[thisName] = getIcons(None)\n return _allIcons\n else:\n return _allIcons\n\n\ndef getIcons(filename=None):\n \"\"\"Creates wxBitmaps ``self.icon`` and ``self.iconAdd`` based on the the image.\n The latter has a plus sign added over the top.\n\n png files work best, but anything that wx.Image can import should be fine\n \"\"\"\n icons = {}\n if filename is None:\n filename = join(resourcesPath, 'base.png')\n\n # get the low-res version first\n im = Image.open(filename)\n icons['24'] = pilToBitmap(im, scaleFactor=0.5)\n icons['24add'] = pilToBitmap(im, scaleFactor=0.5)\n # try to find a 128x128 version\n filename128 = filename[:-4]+'128.png'\n if False: # TURN OFF FOR NOW os.path.isfile(filename128):\n im = Image.open(filename128)\n else:\n im = Image.open(filename)\n icons['48'] = pilToBitmap(im)\n # add the plus sign\n add = Image.open(join(resourcesPath, 'add.png'))\n im.paste(add, [0, 0, add.size[0], add.size[1]], mask=add)\n # im.paste(add, [im.size[0]-add.size[0], im.size[1]-add.size[1],\n # im.size[0], im.size[1]], mask=add)\n icons['48add'] = pilToBitmap(im)\n\n return icons", "path": "psychopy/app/icons.py"}]} | 2,386 | 280 |
gh_patches_debug_3527 | rasdani/github-patches | git_diff | qtile__qtile-738 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
lib*.so references in pangocffi.py
When upgrading from 0.9.1 to 0.10.1, I needed to modify the following references for my system (Ubuntu Vivid) in libqtile/pangocffi.py
gobject = ffi.dlopen('libgobject-2.0.so')
pango = ffi.dlopen('libpango-1.0.so')
pangocairo = ffi.dlopen('libpangocairo-1.0.so')
</issue>
<code>
[start of libqtile/pangocffi.py]
1 # Copyright (c) 2014-2015 Sean Vig
2 # Copyright (c) 2014 roger
3 # Copyright (c) 2014 Tycho Andersen
4 # Copyright (c) 2015 Craig Barnes
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 # SOFTWARE.
23
24 # This module is kind of a hack; you've been warned :-). Some upstream work
25 # needs to happen in order to avoid doing this, though.
26 #
27 # The problem is that we want to use pango to draw stuff. We need to create a
28 # cairo surface, in particular an XCB surface. Since we're using xcffib as the
29 # XCB binding and there is no portable way to go from cffi's PyObject* cdata
30 # wrappers to the wrapped type [1], we can't add support to pycairo for XCB
31 # surfaces via xcffib.
32 #
33 # A similar problem exists one layer of indirection down with cairocffi --
34 # python's pangocairo is almost all C, and only works by including pycairo's
35 # headers and accessing members of structs only available in C, and not in
36 # python. Since cairocffi is pure python and also cffi based, we cannot extract
37 # the raw pointer to pass to the existing pangocairo bindings.
38 #
39 # The solution here is to implement a tiny pangocffi for the small set of pango
40 # functions we call. We're doing it directly here because we can, but it would
41 # not be difficult to use more upstream libraries (e.g. cairocffi and some
42 # pangocairocffi when it exists). This also allows us to drop pygtk entirely,
43 # since we are doing our own pango binding.
44 #
45 # [1]: https://groups.google.com/forum/#!topic/python-cffi/SPND0rRmazA
46 #
47 # This is not intended to be a complete cffi-based pango binding.
48
49 import six
50
51 # PyPy < 2.6 compatibility
52 try:
53 from libqtile._ffi_pango import ffi
54 except ImportError:
55 from libqtile.ffi_build import pango_ffi as ffi
56
57 gobject = ffi.dlopen('libgobject-2.0.so')
58 pango = ffi.dlopen('libpango-1.0.so')
59 pangocairo = ffi.dlopen('libpangocairo-1.0.so')
60
61
62 def CairoContext(cairo_t):
63 def create_layout():
64 return PangoLayout(cairo_t._pointer)
65 cairo_t.create_layout = create_layout
66
67 def show_layout(layout):
68 pangocairo.pango_cairo_show_layout(cairo_t._pointer, layout._pointer)
69 cairo_t.show_layout = show_layout
70
71 return cairo_t
72
73 ALIGN_CENTER = pango.PANGO_ALIGN_CENTER
74 ELLIPSIZE_END = pango.PANGO_ELLIPSIZE_END
75 units_from_double = pango.pango_units_from_double
76
77
78 def _const_char_to_py_str(cc):
79 return ''.join(ffi.buffer(cc, len(cc)))
80
81
82 class PangoLayout(object):
83 def __init__(self, cairo_t):
84 self._cairo_t = cairo_t
85 self._pointer = pangocairo.pango_cairo_create_layout(cairo_t)
86
87 def free(p):
88 p = ffi.cast("gpointer", p)
89 gobject.g_object_unref(p)
90 self._pointer = ffi.gc(self._pointer, free)
91
92 def finalize(self):
93 self._desc = None
94 self._pointer = None
95 self._cairo_t = None
96
97 def set_font_description(self, desc):
98 # save a pointer so it doesn't get GC'd out from under us
99 self._desc = desc
100 pango.pango_layout_set_font_description(self._pointer, desc._pointer)
101
102 def get_font_description(self):
103 descr = pango.pango_layout_get_font_description(self._pointer)
104 return FontDescription(descr)
105
106 def set_alignment(self, alignment):
107 pango.pango_layout_set_alignment(self._pointer, alignment)
108
109 def set_attributes(self, attrs):
110 pango.pango_layout_set_attributes(self._pointer, attrs)
111
112 def set_text(self, text):
113 text = text.encode('utf-8')
114 pango.pango_layout_set_text(self._pointer, text, -1)
115
116 def get_text(self):
117 ret = pango.pango_layout_get_text(self._pointer)
118 return _const_char_to_py_str(ret)
119
120 def set_ellipsize(self, ellipzize):
121 pango.pango_layout_set_ellipsize(self._pointer, ellipzize)
122
123 def get_ellipsize(self):
124 return pango.pango_layout_get_ellipsize(self._pointer)
125
126 def get_pixel_size(self):
127 width = ffi.new("int[1]")
128 height = ffi.new("int[1]")
129
130 pango.pango_layout_get_pixel_size(self._pointer, width, height)
131
132 return width[0], height[0]
133
134 def set_width(self, width):
135 pango.pango_layout_set_width(self._pointer, width)
136
137
138 class FontDescription(object):
139 def __init__(self, pointer=None):
140 if pointer is None:
141 self._pointer = pango.pango_font_description_new()
142 self._pointer = ffi.gc(self._pointer, pango.pango_font_description_free)
143 else:
144 self._pointer = pointer
145
146 @classmethod
147 def from_string(cls, string):
148 pointer = pango.pango_font_description_from_string(string.encode())
149 pointer = ffi.gc(pointer, pango.pango_font_description_free)
150 return cls(pointer)
151
152 def set_family(self, family):
153 pango.pango_font_description_set_family(self._pointer, family.encode())
154
155 def get_family(self):
156 ret = pango.pango_font_description_get_family(self._pointer)
157 return _const_char_to_py_str(ret)
158
159 def set_absolute_size(self, size):
160 pango.pango_font_description_set_absolute_size(self._pointer, size)
161
162 def set_size(self, size):
163 pango.pango_font_description_set_size(self._pointer, size)
164
165 def get_size(self, size):
166 return pango.pango_font_description_get_size(self._pointer, size)
167
168
169 def parse_markup(value, accel_marker=0):
170 attr_list = ffi.new("PangoAttrList**")
171 text = ffi.new("char**")
172 error = ffi.new("GError**")
173 if six.PY3:
174 value = value.encode()
175
176 ret = pango.pango_parse_markup(value, -1, accel_marker, attr_list, text, ffi.NULL, error)
177
178 if ret == 0:
179 raise Exception("parse_markup() failed for %s" % value)
180
181 return attr_list[0], ffi.string(text[0]), six.unichr(accel_marker)
182
183
184 def markup_escape_text(text):
185 ret = gobject.g_markup_escape_text(text.encode(), -1)
186 if six.PY3:
187 return ffi.string(ret).decode()
188 return ffi.string(ret)
189
[end of libqtile/pangocffi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/libqtile/pangocffi.py b/libqtile/pangocffi.py
--- a/libqtile/pangocffi.py
+++ b/libqtile/pangocffi.py
@@ -54,9 +54,9 @@
except ImportError:
from libqtile.ffi_build import pango_ffi as ffi
-gobject = ffi.dlopen('libgobject-2.0.so')
-pango = ffi.dlopen('libpango-1.0.so')
-pangocairo = ffi.dlopen('libpangocairo-1.0.so')
+gobject = ffi.dlopen('libgobject-2.0.so.0')
+pango = ffi.dlopen('libpango-1.0.so.0')
+pangocairo = ffi.dlopen('libpangocairo-1.0.so.0')
def CairoContext(cairo_t):
| {"golden_diff": "diff --git a/libqtile/pangocffi.py b/libqtile/pangocffi.py\n--- a/libqtile/pangocffi.py\n+++ b/libqtile/pangocffi.py\n@@ -54,9 +54,9 @@\n except ImportError:\n from libqtile.ffi_build import pango_ffi as ffi\n \n-gobject = ffi.dlopen('libgobject-2.0.so')\n-pango = ffi.dlopen('libpango-1.0.so')\n-pangocairo = ffi.dlopen('libpangocairo-1.0.so')\n+gobject = ffi.dlopen('libgobject-2.0.so.0')\n+pango = ffi.dlopen('libpango-1.0.so.0')\n+pangocairo = ffi.dlopen('libpangocairo-1.0.so.0')\n \n \n def CairoContext(cairo_t):\n", "issue": "lib*.so references in pangocffi.py\nWhen upgrading from 0.9.1 to 0.10.1, I needed to modify the following references for my system (Ubuntu Vivid) in libqtile/pangocffi.py\n\ngobject = ffi.dlopen('libgobject-2.0.so')\npango = ffi.dlopen('libpango-1.0.so')\npangocairo = ffi.dlopen('libpangocairo-1.0.so')\n\n", "before_files": [{"content": "# Copyright (c) 2014-2015 Sean Vig\n# Copyright (c) 2014 roger\n# Copyright (c) 2014 Tycho Andersen\n# Copyright (c) 2015 Craig Barnes\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# This module is kind of a hack; you've been warned :-). Some upstream work\n# needs to happen in order to avoid doing this, though.\n#\n# The problem is that we want to use pango to draw stuff. We need to create a\n# cairo surface, in particular an XCB surface. Since we're using xcffib as the\n# XCB binding and there is no portable way to go from cffi's PyObject* cdata\n# wrappers to the wrapped type [1], we can't add support to pycairo for XCB\n# surfaces via xcffib.\n#\n# A similar problem exists one layer of indirection down with cairocffi --\n# python's pangocairo is almost all C, and only works by including pycairo's\n# headers and accessing members of structs only available in C, and not in\n# python. Since cairocffi is pure python and also cffi based, we cannot extract\n# the raw pointer to pass to the existing pangocairo bindings.\n#\n# The solution here is to implement a tiny pangocffi for the small set of pango\n# functions we call. We're doing it directly here because we can, but it would\n# not be difficult to use more upstream libraries (e.g. cairocffi and some\n# pangocairocffi when it exists). This also allows us to drop pygtk entirely,\n# since we are doing our own pango binding.\n#\n# [1]: https://groups.google.com/forum/#!topic/python-cffi/SPND0rRmazA\n#\n# This is not intended to be a complete cffi-based pango binding.\n\nimport six\n\n# PyPy < 2.6 compatibility\ntry:\n from libqtile._ffi_pango import ffi\nexcept ImportError:\n from libqtile.ffi_build import pango_ffi as ffi\n\ngobject = ffi.dlopen('libgobject-2.0.so')\npango = ffi.dlopen('libpango-1.0.so')\npangocairo = ffi.dlopen('libpangocairo-1.0.so')\n\n\ndef CairoContext(cairo_t):\n def create_layout():\n return PangoLayout(cairo_t._pointer)\n cairo_t.create_layout = create_layout\n\n def show_layout(layout):\n pangocairo.pango_cairo_show_layout(cairo_t._pointer, layout._pointer)\n cairo_t.show_layout = show_layout\n\n return cairo_t\n\nALIGN_CENTER = pango.PANGO_ALIGN_CENTER\nELLIPSIZE_END = pango.PANGO_ELLIPSIZE_END\nunits_from_double = pango.pango_units_from_double\n\n\ndef _const_char_to_py_str(cc):\n return ''.join(ffi.buffer(cc, len(cc)))\n\n\nclass PangoLayout(object):\n def __init__(self, cairo_t):\n self._cairo_t = cairo_t\n self._pointer = pangocairo.pango_cairo_create_layout(cairo_t)\n\n def free(p):\n p = ffi.cast(\"gpointer\", p)\n gobject.g_object_unref(p)\n self._pointer = ffi.gc(self._pointer, free)\n\n def finalize(self):\n self._desc = None\n self._pointer = None\n self._cairo_t = None\n\n def set_font_description(self, desc):\n # save a pointer so it doesn't get GC'd out from under us\n self._desc = desc\n pango.pango_layout_set_font_description(self._pointer, desc._pointer)\n\n def get_font_description(self):\n descr = pango.pango_layout_get_font_description(self._pointer)\n return FontDescription(descr)\n\n def set_alignment(self, alignment):\n pango.pango_layout_set_alignment(self._pointer, alignment)\n\n def set_attributes(self, attrs):\n pango.pango_layout_set_attributes(self._pointer, attrs)\n\n def set_text(self, text):\n text = text.encode('utf-8')\n pango.pango_layout_set_text(self._pointer, text, -1)\n\n def get_text(self):\n ret = pango.pango_layout_get_text(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_ellipsize(self, ellipzize):\n pango.pango_layout_set_ellipsize(self._pointer, ellipzize)\n\n def get_ellipsize(self):\n return pango.pango_layout_get_ellipsize(self._pointer)\n\n def get_pixel_size(self):\n width = ffi.new(\"int[1]\")\n height = ffi.new(\"int[1]\")\n\n pango.pango_layout_get_pixel_size(self._pointer, width, height)\n\n return width[0], height[0]\n\n def set_width(self, width):\n pango.pango_layout_set_width(self._pointer, width)\n\n\nclass FontDescription(object):\n def __init__(self, pointer=None):\n if pointer is None:\n self._pointer = pango.pango_font_description_new()\n self._pointer = ffi.gc(self._pointer, pango.pango_font_description_free)\n else:\n self._pointer = pointer\n\n @classmethod\n def from_string(cls, string):\n pointer = pango.pango_font_description_from_string(string.encode())\n pointer = ffi.gc(pointer, pango.pango_font_description_free)\n return cls(pointer)\n\n def set_family(self, family):\n pango.pango_font_description_set_family(self._pointer, family.encode())\n\n def get_family(self):\n ret = pango.pango_font_description_get_family(self._pointer)\n return _const_char_to_py_str(ret)\n\n def set_absolute_size(self, size):\n pango.pango_font_description_set_absolute_size(self._pointer, size)\n\n def set_size(self, size):\n pango.pango_font_description_set_size(self._pointer, size)\n\n def get_size(self, size):\n return pango.pango_font_description_get_size(self._pointer, size)\n\n\ndef parse_markup(value, accel_marker=0):\n attr_list = ffi.new(\"PangoAttrList**\")\n text = ffi.new(\"char**\")\n error = ffi.new(\"GError**\")\n if six.PY3:\n value = value.encode()\n\n ret = pango.pango_parse_markup(value, -1, accel_marker, attr_list, text, ffi.NULL, error)\n\n if ret == 0:\n raise Exception(\"parse_markup() failed for %s\" % value)\n\n return attr_list[0], ffi.string(text[0]), six.unichr(accel_marker)\n\n\ndef markup_escape_text(text):\n ret = gobject.g_markup_escape_text(text.encode(), -1)\n if six.PY3:\n return ffi.string(ret).decode()\n return ffi.string(ret)\n", "path": "libqtile/pangocffi.py"}]} | 2,831 | 194 |
gh_patches_debug_20653 | rasdani/github-patches | git_diff | ipython__ipython-6338 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement check_pid for .NET
This is a reminder - I added an `IPython.utils.process.check_pid` function to check if a process exists with a given PID. However, we have a `_process_cli` module containing .NET implementations of these things, and I didn't write the check_pid implementation for .NET.
</issue>
<code>
[start of IPython/utils/process.py]
1 # encoding: utf-8
2 """
3 Utilities for working with external processes.
4 """
5
6 #-----------------------------------------------------------------------------
7 # Copyright (C) 2008-2011 The IPython Development Team
8 #
9 # Distributed under the terms of the BSD License. The full license is in
10 # the file COPYING, distributed as part of this software.
11 #-----------------------------------------------------------------------------
12
13 #-----------------------------------------------------------------------------
14 # Imports
15 #-----------------------------------------------------------------------------
16 from __future__ import print_function
17
18 # Stdlib
19 import os
20 import sys
21
22 # Our own
23 if sys.platform == 'win32':
24 from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid
25 elif sys.platform == 'cli':
26 from ._process_cli import _find_cmd, system, getoutput, arg_split
27 else:
28 from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid
29
30 from ._process_common import getoutputerror, get_output_error_code, process_handler
31 from . import py3compat
32
33 #-----------------------------------------------------------------------------
34 # Code
35 #-----------------------------------------------------------------------------
36
37
38 class FindCmdError(Exception):
39 pass
40
41
42 def find_cmd(cmd):
43 """Find absolute path to executable cmd in a cross platform manner.
44
45 This function tries to determine the full path to a command line program
46 using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the
47 time it will use the version that is first on the users `PATH`.
48
49 Warning, don't use this to find IPython command line programs as there
50 is a risk you will find the wrong one. Instead find those using the
51 following code and looking for the application itself::
52
53 from IPython.utils.path import get_ipython_module_path
54 from IPython.utils.process import pycmd2argv
55 argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))
56
57 Parameters
58 ----------
59 cmd : str
60 The command line program to look for.
61 """
62 try:
63 path = _find_cmd(cmd).rstrip()
64 except OSError:
65 raise FindCmdError('command could not be found: %s' % cmd)
66 # which returns empty if not found
67 if path == '':
68 raise FindCmdError('command could not be found: %s' % cmd)
69 return os.path.abspath(path)
70
71
72 def is_cmd_found(cmd):
73 """Check whether executable `cmd` exists or not and return a bool."""
74 try:
75 find_cmd(cmd)
76 return True
77 except FindCmdError:
78 return False
79
80
81 def pycmd2argv(cmd):
82 r"""Take the path of a python command and return a list (argv-style).
83
84 This only works on Python based command line programs and will find the
85 location of the ``python`` executable using ``sys.executable`` to make
86 sure the right version is used.
87
88 For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,
89 .com or .bat, and [, cmd] otherwise.
90
91 Parameters
92 ----------
93 cmd : string
94 The path of the command.
95
96 Returns
97 -------
98 argv-style list.
99 """
100 ext = os.path.splitext(cmd)[1]
101 if ext in ['.exe', '.com', '.bat']:
102 return [cmd]
103 else:
104 return [sys.executable, cmd]
105
106
107 def abbrev_cwd():
108 """ Return abbreviated version of cwd, e.g. d:mydir """
109 cwd = py3compat.getcwd().replace('\\','/')
110 drivepart = ''
111 tail = cwd
112 if sys.platform == 'win32':
113 if len(cwd) < 4:
114 return cwd
115 drivepart,tail = os.path.splitdrive(cwd)
116
117
118 parts = tail.split('/')
119 if len(parts) > 2:
120 tail = '/'.join(parts[-2:])
121
122 return (drivepart + (
123 cwd == '/' and '/' or tail))
124
[end of IPython/utils/process.py]
[start of IPython/utils/_process_cli.py]
1 """cli-specific implementation of process utilities.
2
3 cli - Common Language Infrastructure for IronPython. Code
4 can run on any operating system. Check os.name for os-
5 specific settings.
6
7 This file is only meant to be imported by process.py, not by end-users.
8
9 This file is largely untested. To become a full drop-in process
10 interface for IronPython will probably require you to help fill
11 in the details.
12 """
13
14 # Import cli libraries:
15 import clr
16 import System
17
18 # Import Python libraries:
19 import os
20
21 # Import IPython libraries:
22 from IPython.utils import py3compat
23 from ._process_common import arg_split
24
25 def _find_cmd(cmd):
26 """Find the full path to a command using which."""
27 paths = System.Environment.GetEnvironmentVariable("PATH").Split(os.pathsep)
28 for path in paths:
29 filename = os.path.join(path, cmd)
30 if System.IO.File.Exists(filename):
31 return py3compat.bytes_to_str(filename)
32 raise OSError("command %r not found" % cmd)
33
34 def system(cmd):
35 """
36 system(cmd) should work in a cli environment on Mac OSX, Linux,
37 and Windows
38 """
39 psi = System.Diagnostics.ProcessStartInfo(cmd)
40 psi.RedirectStandardOutput = True
41 psi.RedirectStandardError = True
42 psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
43 psi.UseShellExecute = False
44 # Start up process:
45 reg = System.Diagnostics.Process.Start(psi)
46
47 def getoutput(cmd):
48 """
49 getoutput(cmd) should work in a cli environment on Mac OSX, Linux,
50 and Windows
51 """
52 psi = System.Diagnostics.ProcessStartInfo(cmd)
53 psi.RedirectStandardOutput = True
54 psi.RedirectStandardError = True
55 psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal
56 psi.UseShellExecute = False
57 # Start up process:
58 reg = System.Diagnostics.Process.Start(psi)
59 myOutput = reg.StandardOutput
60 output = myOutput.ReadToEnd()
61 myError = reg.StandardError
62 error = myError.ReadToEnd()
63 return output
64
[end of IPython/utils/_process_cli.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/utils/_process_cli.py b/IPython/utils/_process_cli.py
--- a/IPython/utils/_process_cli.py
+++ b/IPython/utils/_process_cli.py
@@ -61,3 +61,18 @@
myError = reg.StandardError
error = myError.ReadToEnd()
return output
+
+def check_pid(pid):
+ """
+ Check if a process with the given PID (pid) exists
+ """
+ try:
+ System.Diagnostics.Process.GetProcessById(pid)
+ # process with given pid is running
+ return True
+ except System.InvalidOperationException:
+ # process wasn't started by this object (but is running)
+ return True
+ except System.ArgumentException:
+ # process with given pid isn't running
+ return False
diff --git a/IPython/utils/process.py b/IPython/utils/process.py
--- a/IPython/utils/process.py
+++ b/IPython/utils/process.py
@@ -23,7 +23,7 @@
if sys.platform == 'win32':
from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid
elif sys.platform == 'cli':
- from ._process_cli import _find_cmd, system, getoutput, arg_split
+ from ._process_cli import _find_cmd, system, getoutput, arg_split, check_pid
else:
from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid
| {"golden_diff": "diff --git a/IPython/utils/_process_cli.py b/IPython/utils/_process_cli.py\n--- a/IPython/utils/_process_cli.py\n+++ b/IPython/utils/_process_cli.py\n@@ -61,3 +61,18 @@\n myError = reg.StandardError\n error = myError.ReadToEnd()\n return output\n+\n+def check_pid(pid):\n+ \"\"\"\n+ Check if a process with the given PID (pid) exists\n+ \"\"\"\n+ try:\n+ System.Diagnostics.Process.GetProcessById(pid)\n+ # process with given pid is running\n+ return True\n+ except System.InvalidOperationException:\n+ # process wasn't started by this object (but is running)\n+ return True\n+ except System.ArgumentException:\n+ # process with given pid isn't running\n+ return False \ndiff --git a/IPython/utils/process.py b/IPython/utils/process.py\n--- a/IPython/utils/process.py\n+++ b/IPython/utils/process.py\n@@ -23,7 +23,7 @@\n if sys.platform == 'win32':\n from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid\n elif sys.platform == 'cli':\n- from ._process_cli import _find_cmd, system, getoutput, arg_split\n+ from ._process_cli import _find_cmd, system, getoutput, arg_split, check_pid\n else:\n from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid\n", "issue": "Implement check_pid for .NET\nThis is a reminder - I added an `IPython.utils.process.check_pid` function to check if a process exists with a given PID. However, we have a `_process_cli` module containing .NET implementations of these things, and I didn't write the check_pid implementation for .NET.\n\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nUtilities for working with external processes.\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\nfrom __future__ import print_function\n\n# Stdlib\nimport os\nimport sys\n\n# Our own\nif sys.platform == 'win32':\n from ._process_win32 import _find_cmd, system, getoutput, arg_split, check_pid\nelif sys.platform == 'cli':\n from ._process_cli import _find_cmd, system, getoutput, arg_split\nelse:\n from ._process_posix import _find_cmd, system, getoutput, arg_split, check_pid\n\nfrom ._process_common import getoutputerror, get_output_error_code, process_handler\nfrom . import py3compat\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n\n\nclass FindCmdError(Exception):\n pass\n\n\ndef find_cmd(cmd):\n \"\"\"Find absolute path to executable cmd in a cross platform manner.\n\n This function tries to determine the full path to a command line program\n using `which` on Unix/Linux/OS X and `win32api` on Windows. Most of the\n time it will use the version that is first on the users `PATH`.\n\n Warning, don't use this to find IPython command line programs as there\n is a risk you will find the wrong one. Instead find those using the\n following code and looking for the application itself::\n\n from IPython.utils.path import get_ipython_module_path\n from IPython.utils.process import pycmd2argv\n argv = pycmd2argv(get_ipython_module_path('IPython.terminal.ipapp'))\n\n Parameters\n ----------\n cmd : str\n The command line program to look for.\n \"\"\"\n try:\n path = _find_cmd(cmd).rstrip()\n except OSError:\n raise FindCmdError('command could not be found: %s' % cmd)\n # which returns empty if not found\n if path == '':\n raise FindCmdError('command could not be found: %s' % cmd)\n return os.path.abspath(path)\n\n\ndef is_cmd_found(cmd):\n \"\"\"Check whether executable `cmd` exists or not and return a bool.\"\"\"\n try:\n find_cmd(cmd)\n return True\n except FindCmdError:\n return False\n\n\ndef pycmd2argv(cmd):\n r\"\"\"Take the path of a python command and return a list (argv-style).\n\n This only works on Python based command line programs and will find the\n location of the ``python`` executable using ``sys.executable`` to make\n sure the right version is used.\n\n For a given path ``cmd``, this returns [cmd] if cmd's extension is .exe,\n .com or .bat, and [, cmd] otherwise.\n\n Parameters\n ----------\n cmd : string\n The path of the command.\n\n Returns\n -------\n argv-style list.\n \"\"\"\n ext = os.path.splitext(cmd)[1]\n if ext in ['.exe', '.com', '.bat']:\n return [cmd]\n else:\n return [sys.executable, cmd]\n\n\ndef abbrev_cwd():\n \"\"\" Return abbreviated version of cwd, e.g. d:mydir \"\"\"\n cwd = py3compat.getcwd().replace('\\\\','/')\n drivepart = ''\n tail = cwd\n if sys.platform == 'win32':\n if len(cwd) < 4:\n return cwd\n drivepart,tail = os.path.splitdrive(cwd)\n\n\n parts = tail.split('/')\n if len(parts) > 2:\n tail = '/'.join(parts[-2:])\n\n return (drivepart + (\n cwd == '/' and '/' or tail))\n", "path": "IPython/utils/process.py"}, {"content": "\"\"\"cli-specific implementation of process utilities.\n\ncli - Common Language Infrastructure for IronPython. Code\n can run on any operating system. Check os.name for os-\n specific settings.\n\nThis file is only meant to be imported by process.py, not by end-users.\n\nThis file is largely untested. To become a full drop-in process\ninterface for IronPython will probably require you to help fill\nin the details. \n\"\"\"\n\n# Import cli libraries:\nimport clr\nimport System\n\n# Import Python libraries:\nimport os\n\n# Import IPython libraries:\nfrom IPython.utils import py3compat\nfrom ._process_common import arg_split\n\ndef _find_cmd(cmd):\n \"\"\"Find the full path to a command using which.\"\"\"\n paths = System.Environment.GetEnvironmentVariable(\"PATH\").Split(os.pathsep)\n for path in paths:\n filename = os.path.join(path, cmd)\n if System.IO.File.Exists(filename):\n return py3compat.bytes_to_str(filename)\n raise OSError(\"command %r not found\" % cmd)\n\ndef system(cmd):\n \"\"\"\n system(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n\ndef getoutput(cmd):\n \"\"\"\n getoutput(cmd) should work in a cli environment on Mac OSX, Linux,\n and Windows\n \"\"\"\n psi = System.Diagnostics.ProcessStartInfo(cmd)\n psi.RedirectStandardOutput = True\n psi.RedirectStandardError = True\n psi.WindowStyle = System.Diagnostics.ProcessWindowStyle.Normal\n psi.UseShellExecute = False\n # Start up process:\n reg = System.Diagnostics.Process.Start(psi)\n myOutput = reg.StandardOutput\n output = myOutput.ReadToEnd()\n myError = reg.StandardError\n error = myError.ReadToEnd()\n return output\n", "path": "IPython/utils/_process_cli.py"}]} | 2,295 | 327 |
gh_patches_debug_39622 | rasdani/github-patches | git_diff | openai__gym-2158 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Base Env class should be abstract
Hi, thank you for this lib,
The Env class should be abstract but it is not. Instead its methods have a `raise NotImplementedError` body.
There are advantages to making this abstract: for example PyLance can properly analyze gym code.
I think Env should subclass `ABC` and methods should be annotated as `@abstractmethod`.
</issue>
<code>
[start of gym/core.py]
1 import gym
2 from gym import error
3 from gym.utils import closer
4
5 env_closer = closer.Closer()
6
7
8 class Env(object):
9 """The main OpenAI Gym class. It encapsulates an environment with
10 arbitrary behind-the-scenes dynamics. An environment can be
11 partially or fully observed.
12
13 The main API methods that users of this class need to know are:
14
15 step
16 reset
17 render
18 close
19 seed
20
21 And set the following attributes:
22
23 action_space: The Space object corresponding to valid actions
24 observation_space: The Space object corresponding to valid observations
25 reward_range: A tuple corresponding to the min and max possible rewards
26
27 Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
28
29 The methods are accessed publicly as "step", "reset", etc...
30 """
31
32 # Set this in SOME subclasses
33 metadata = {"render.modes": []}
34 reward_range = (-float("inf"), float("inf"))
35 spec = None
36
37 # Set these in ALL subclasses
38 action_space = None
39 observation_space = None
40
41 def step(self, action):
42 """Run one timestep of the environment's dynamics. When end of
43 episode is reached, you are responsible for calling `reset()`
44 to reset this environment's state.
45
46 Accepts an action and returns a tuple (observation, reward, done, info).
47
48 Args:
49 action (object): an action provided by the agent
50
51 Returns:
52 observation (object): agent's observation of the current environment
53 reward (float) : amount of reward returned after previous action
54 done (bool): whether the episode has ended, in which case further step() calls will return undefined results
55 info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
56 """
57 raise NotImplementedError
58
59 def reset(self):
60 """Resets the environment to an initial state and returns an initial
61 observation.
62
63 Note that this function should not reset the environment's random
64 number generator(s); random variables in the environment's state should
65 be sampled independently between multiple calls to `reset()`. In other
66 words, each call of `reset()` should yield an environment suitable for
67 a new episode, independent of previous episodes.
68
69 Returns:
70 observation (object): the initial observation.
71 """
72 raise NotImplementedError
73
74 def render(self, mode="human"):
75 """Renders the environment.
76
77 The set of supported modes varies per environment. (And some
78 environments do not support rendering at all.) By convention,
79 if mode is:
80
81 - human: render to the current display or terminal and
82 return nothing. Usually for human consumption.
83 - rgb_array: Return an numpy.ndarray with shape (x, y, 3),
84 representing RGB values for an x-by-y pixel image, suitable
85 for turning into a video.
86 - ansi: Return a string (str) or StringIO.StringIO containing a
87 terminal-style text representation. The text can include newlines
88 and ANSI escape sequences (e.g. for colors).
89
90 Note:
91 Make sure that your class's metadata 'render.modes' key includes
92 the list of supported modes. It's recommended to call super()
93 in implementations to use the functionality of this method.
94
95 Args:
96 mode (str): the mode to render with
97
98 Example:
99
100 class MyEnv(Env):
101 metadata = {'render.modes': ['human', 'rgb_array']}
102
103 def render(self, mode='human'):
104 if mode == 'rgb_array':
105 return np.array(...) # return RGB frame suitable for video
106 elif mode == 'human':
107 ... # pop up a window and render
108 else:
109 super(MyEnv, self).render(mode=mode) # just raise an exception
110 """
111 raise NotImplementedError
112
113 def close(self):
114 """Override close in your subclass to perform any necessary cleanup.
115
116 Environments will automatically close() themselves when
117 garbage collected or when the program exits.
118 """
119 pass
120
121 def seed(self, seed=None):
122 """Sets the seed for this env's random number generator(s).
123
124 Note:
125 Some environments use multiple pseudorandom number generators.
126 We want to capture all such seeds used in order to ensure that
127 there aren't accidental correlations between multiple generators.
128
129 Returns:
130 list<bigint>: Returns the list of seeds used in this env's random
131 number generators. The first value in the list should be the
132 "main" seed, or the value which a reproducer should pass to
133 'seed'. Often, the main seed equals the provided 'seed', but
134 this won't be true if seed=None, for example.
135 """
136 return
137
138 @property
139 def unwrapped(self):
140 """Completely unwrap this env.
141
142 Returns:
143 gym.Env: The base non-wrapped gym.Env instance
144 """
145 return self
146
147 def __str__(self):
148 if self.spec is None:
149 return "<{} instance>".format(type(self).__name__)
150 else:
151 return "<{}<{}>>".format(type(self).__name__, self.spec.id)
152
153 def __enter__(self):
154 """Support with-statement for the environment."""
155 return self
156
157 def __exit__(self, *args):
158 """Support with-statement for the environment."""
159 self.close()
160 # propagate exception
161 return False
162
163
164 class GoalEnv(Env):
165 """A goal-based environment. It functions just as any regular OpenAI Gym environment but it
166 imposes a required structure on the observation_space. More concretely, the observation
167 space is required to contain at least three elements, namely `observation`, `desired_goal`, and
168 `achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
169 `achieved_goal` is the goal that it currently achieved instead. `observation` contains the
170 actual observations of the environment as per usual.
171 """
172
173 def reset(self):
174 # Enforce that each GoalEnv uses a Goal-compatible observation space.
175 if not isinstance(self.observation_space, gym.spaces.Dict):
176 raise error.Error(
177 "GoalEnv requires an observation space of type gym.spaces.Dict"
178 )
179 for key in ["observation", "achieved_goal", "desired_goal"]:
180 if key not in self.observation_space.spaces:
181 raise error.Error(
182 'GoalEnv requires the "{}" key to be part of the observation dictionary.'.format(
183 key
184 )
185 )
186
187 def compute_reward(self, achieved_goal, desired_goal, info):
188 """Compute the step reward. This externalizes the reward function and makes
189 it dependent on a desired goal and the one that was achieved. If you wish to include
190 additional rewards that are independent of the goal, you can include the necessary values
191 to derive it in 'info' and compute it accordingly.
192
193 Args:
194 achieved_goal (object): the goal that was achieved during execution
195 desired_goal (object): the desired goal that we asked the agent to attempt to achieve
196 info (dict): an info dictionary with additional information
197
198 Returns:
199 float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
200 goal. Note that the following should always hold true:
201
202 ob, reward, done, info = env.step()
203 assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)
204 """
205 raise NotImplementedError
206
207
208 class Wrapper(Env):
209 """Wraps the environment to allow a modular transformation.
210
211 This class is the base class for all wrappers. The subclass could override
212 some methods to change the behavior of the original environment without touching the
213 original code.
214
215 .. note::
216
217 Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
218
219 """
220
221 def __init__(self, env):
222 self.env = env
223 self.action_space = self.env.action_space
224 self.observation_space = self.env.observation_space
225 self.reward_range = self.env.reward_range
226 self.metadata = self.env.metadata
227
228 def __getattr__(self, name):
229 if name.startswith("_"):
230 raise AttributeError(
231 "attempted to get missing private attribute '{}'".format(name)
232 )
233 return getattr(self.env, name)
234
235 @property
236 def spec(self):
237 return self.env.spec
238
239 @classmethod
240 def class_name(cls):
241 return cls.__name__
242
243 def step(self, action):
244 return self.env.step(action)
245
246 def reset(self, **kwargs):
247 return self.env.reset(**kwargs)
248
249 def render(self, mode="human", **kwargs):
250 return self.env.render(mode, **kwargs)
251
252 def close(self):
253 return self.env.close()
254
255 def seed(self, seed=None):
256 return self.env.seed(seed)
257
258 def compute_reward(self, achieved_goal, desired_goal, info):
259 return self.env.compute_reward(achieved_goal, desired_goal, info)
260
261 def __str__(self):
262 return "<{}{}>".format(type(self).__name__, self.env)
263
264 def __repr__(self):
265 return str(self)
266
267 @property
268 def unwrapped(self):
269 return self.env.unwrapped
270
271
272 class ObservationWrapper(Wrapper):
273 def reset(self, **kwargs):
274 observation = self.env.reset(**kwargs)
275 return self.observation(observation)
276
277 def step(self, action):
278 observation, reward, done, info = self.env.step(action)
279 return self.observation(observation), reward, done, info
280
281 def observation(self, observation):
282 raise NotImplementedError
283
284
285 class RewardWrapper(Wrapper):
286 def reset(self, **kwargs):
287 return self.env.reset(**kwargs)
288
289 def step(self, action):
290 observation, reward, done, info = self.env.step(action)
291 return observation, self.reward(reward), done, info
292
293 def reward(self, reward):
294 raise NotImplementedError
295
296
297 class ActionWrapper(Wrapper):
298 def reset(self, **kwargs):
299 return self.env.reset(**kwargs)
300
301 def step(self, action):
302 return self.env.step(self.action(action))
303
304 def action(self, action):
305 raise NotImplementedError
306
307 def reverse_action(self, action):
308 raise NotImplementedError
309
[end of gym/core.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/core.py b/gym/core.py
--- a/gym/core.py
+++ b/gym/core.py
@@ -1,3 +1,5 @@
+from abc import abstractmethod
+
import gym
from gym import error
from gym.utils import closer
@@ -38,6 +40,7 @@
action_space = None
observation_space = None
+ @abstractmethod
def step(self, action):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
@@ -56,6 +59,7 @@
"""
raise NotImplementedError
+ @abstractmethod
def reset(self):
"""Resets the environment to an initial state and returns an initial
observation.
@@ -71,7 +75,8 @@
"""
raise NotImplementedError
- def render(self, mode="human"):
+ @abstractmethod
+ def render(self, mode='human'):
"""Renders the environment.
The set of supported modes varies per environment. (And some
@@ -184,6 +189,7 @@
)
)
+ @abstractmethod
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
@@ -278,6 +284,7 @@
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
+ @abstractmethod
def observation(self, observation):
raise NotImplementedError
@@ -290,6 +297,7 @@
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
+ @abstractmethod
def reward(self, reward):
raise NotImplementedError
@@ -301,8 +309,10 @@
def step(self, action):
return self.env.step(self.action(action))
+ @abstractmethod
def action(self, action):
raise NotImplementedError
+ @abstractmethod
def reverse_action(self, action):
raise NotImplementedError
| {"golden_diff": "diff --git a/gym/core.py b/gym/core.py\n--- a/gym/core.py\n+++ b/gym/core.py\n@@ -1,3 +1,5 @@\n+from abc import abstractmethod\n+\n import gym\n from gym import error\n from gym.utils import closer\n@@ -38,6 +40,7 @@\n action_space = None\n observation_space = None\n \n+ @abstractmethod\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n@@ -56,6 +59,7 @@\n \"\"\"\n raise NotImplementedError\n \n+ @abstractmethod\n def reset(self):\n \"\"\"Resets the environment to an initial state and returns an initial\n observation.\n@@ -71,7 +75,8 @@\n \"\"\"\n raise NotImplementedError\n \n- def render(self, mode=\"human\"):\n+ @abstractmethod\n+ def render(self, mode='human'):\n \"\"\"Renders the environment.\n \n The set of supported modes varies per environment. (And some\n@@ -184,6 +189,7 @@\n )\n )\n \n+ @abstractmethod\n def compute_reward(self, achieved_goal, desired_goal, info):\n \"\"\"Compute the step reward. This externalizes the reward function and makes\n it dependent on a desired goal and the one that was achieved. If you wish to include\n@@ -278,6 +284,7 @@\n observation, reward, done, info = self.env.step(action)\n return self.observation(observation), reward, done, info\n \n+ @abstractmethod\n def observation(self, observation):\n raise NotImplementedError\n \n@@ -290,6 +297,7 @@\n observation, reward, done, info = self.env.step(action)\n return observation, self.reward(reward), done, info\n \n+ @abstractmethod\n def reward(self, reward):\n raise NotImplementedError\n \n@@ -301,8 +309,10 @@\n def step(self, action):\n return self.env.step(self.action(action))\n \n+ @abstractmethod\n def action(self, action):\n raise NotImplementedError\n \n+ @abstractmethod\n def reverse_action(self, action):\n raise NotImplementedError\n", "issue": "Base Env class should be abstract\nHi, thank you for this lib,\r\nThe Env class should be abstract but it is not. Instead its methods have a `raise NotImplementedError` body.\r\nThere are advantages to making this abstract: for example PyLance can properly analyze gym code.\r\n\r\nI think Env should subclass `ABC` and methods should be annotated as `@abstractmethod`.\n", "before_files": [{"content": "import gym\nfrom gym import error\nfrom gym.utils import closer\n\nenv_closer = closer.Closer()\n\n\nclass Env(object):\n \"\"\"The main OpenAI Gym class. It encapsulates an environment with\n arbitrary behind-the-scenes dynamics. An environment can be\n partially or fully observed.\n\n The main API methods that users of this class need to know are:\n\n step\n reset\n render\n close\n seed\n\n And set the following attributes:\n\n action_space: The Space object corresponding to valid actions\n observation_space: The Space object corresponding to valid observations\n reward_range: A tuple corresponding to the min and max possible rewards\n\n Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.\n\n The methods are accessed publicly as \"step\", \"reset\", etc...\n \"\"\"\n\n # Set this in SOME subclasses\n metadata = {\"render.modes\": []}\n reward_range = (-float(\"inf\"), float(\"inf\"))\n spec = None\n\n # Set these in ALL subclasses\n action_space = None\n observation_space = None\n\n def step(self, action):\n \"\"\"Run one timestep of the environment's dynamics. When end of\n episode is reached, you are responsible for calling `reset()`\n to reset this environment's state.\n\n Accepts an action and returns a tuple (observation, reward, done, info).\n\n Args:\n action (object): an action provided by the agent\n\n Returns:\n observation (object): agent's observation of the current environment\n reward (float) : amount of reward returned after previous action\n done (bool): whether the episode has ended, in which case further step() calls will return undefined results\n info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)\n \"\"\"\n raise NotImplementedError\n\n def reset(self):\n \"\"\"Resets the environment to an initial state and returns an initial\n observation.\n\n Note that this function should not reset the environment's random\n number generator(s); random variables in the environment's state should\n be sampled independently between multiple calls to `reset()`. In other\n words, each call of `reset()` should yield an environment suitable for\n a new episode, independent of previous episodes.\n\n Returns:\n observation (object): the initial observation.\n \"\"\"\n raise NotImplementedError\n\n def render(self, mode=\"human\"):\n \"\"\"Renders the environment.\n\n The set of supported modes varies per environment. (And some\n environments do not support rendering at all.) By convention,\n if mode is:\n\n - human: render to the current display or terminal and\n return nothing. Usually for human consumption.\n - rgb_array: Return an numpy.ndarray with shape (x, y, 3),\n representing RGB values for an x-by-y pixel image, suitable\n for turning into a video.\n - ansi: Return a string (str) or StringIO.StringIO containing a\n terminal-style text representation. The text can include newlines\n and ANSI escape sequences (e.g. for colors).\n\n Note:\n Make sure that your class's metadata 'render.modes' key includes\n the list of supported modes. It's recommended to call super()\n in implementations to use the functionality of this method.\n\n Args:\n mode (str): the mode to render with\n\n Example:\n\n class MyEnv(Env):\n metadata = {'render.modes': ['human', 'rgb_array']}\n\n def render(self, mode='human'):\n if mode == 'rgb_array':\n return np.array(...) # return RGB frame suitable for video\n elif mode == 'human':\n ... # pop up a window and render\n else:\n super(MyEnv, self).render(mode=mode) # just raise an exception\n \"\"\"\n raise NotImplementedError\n\n def close(self):\n \"\"\"Override close in your subclass to perform any necessary cleanup.\n\n Environments will automatically close() themselves when\n garbage collected or when the program exits.\n \"\"\"\n pass\n\n def seed(self, seed=None):\n \"\"\"Sets the seed for this env's random number generator(s).\n\n Note:\n Some environments use multiple pseudorandom number generators.\n We want to capture all such seeds used in order to ensure that\n there aren't accidental correlations between multiple generators.\n\n Returns:\n list<bigint>: Returns the list of seeds used in this env's random\n number generators. The first value in the list should be the\n \"main\" seed, or the value which a reproducer should pass to\n 'seed'. Often, the main seed equals the provided 'seed', but\n this won't be true if seed=None, for example.\n \"\"\"\n return\n\n @property\n def unwrapped(self):\n \"\"\"Completely unwrap this env.\n\n Returns:\n gym.Env: The base non-wrapped gym.Env instance\n \"\"\"\n return self\n\n def __str__(self):\n if self.spec is None:\n return \"<{} instance>\".format(type(self).__name__)\n else:\n return \"<{}<{}>>\".format(type(self).__name__, self.spec.id)\n\n def __enter__(self):\n \"\"\"Support with-statement for the environment.\"\"\"\n return self\n\n def __exit__(self, *args):\n \"\"\"Support with-statement for the environment.\"\"\"\n self.close()\n # propagate exception\n return False\n\n\nclass GoalEnv(Env):\n \"\"\"A goal-based environment. It functions just as any regular OpenAI Gym environment but it\n imposes a required structure on the observation_space. More concretely, the observation\n space is required to contain at least three elements, namely `observation`, `desired_goal`, and\n `achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.\n `achieved_goal` is the goal that it currently achieved instead. `observation` contains the\n actual observations of the environment as per usual.\n \"\"\"\n\n def reset(self):\n # Enforce that each GoalEnv uses a Goal-compatible observation space.\n if not isinstance(self.observation_space, gym.spaces.Dict):\n raise error.Error(\n \"GoalEnv requires an observation space of type gym.spaces.Dict\"\n )\n for key in [\"observation\", \"achieved_goal\", \"desired_goal\"]:\n if key not in self.observation_space.spaces:\n raise error.Error(\n 'GoalEnv requires the \"{}\" key to be part of the observation dictionary.'.format(\n key\n )\n )\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n \"\"\"Compute the step reward. This externalizes the reward function and makes\n it dependent on a desired goal and the one that was achieved. If you wish to include\n additional rewards that are independent of the goal, you can include the necessary values\n to derive it in 'info' and compute it accordingly.\n\n Args:\n achieved_goal (object): the goal that was achieved during execution\n desired_goal (object): the desired goal that we asked the agent to attempt to achieve\n info (dict): an info dictionary with additional information\n\n Returns:\n float: The reward that corresponds to the provided achieved goal w.r.t. to the desired\n goal. Note that the following should always hold true:\n\n ob, reward, done, info = env.step()\n assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)\n \"\"\"\n raise NotImplementedError\n\n\nclass Wrapper(Env):\n \"\"\"Wraps the environment to allow a modular transformation.\n\n This class is the base class for all wrappers. The subclass could override\n some methods to change the behavior of the original environment without touching the\n original code.\n\n .. note::\n\n Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.\n\n \"\"\"\n\n def __init__(self, env):\n self.env = env\n self.action_space = self.env.action_space\n self.observation_space = self.env.observation_space\n self.reward_range = self.env.reward_range\n self.metadata = self.env.metadata\n\n def __getattr__(self, name):\n if name.startswith(\"_\"):\n raise AttributeError(\n \"attempted to get missing private attribute '{}'\".format(name)\n )\n return getattr(self.env, name)\n\n @property\n def spec(self):\n return self.env.spec\n\n @classmethod\n def class_name(cls):\n return cls.__name__\n\n def step(self, action):\n return self.env.step(action)\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def render(self, mode=\"human\", **kwargs):\n return self.env.render(mode, **kwargs)\n\n def close(self):\n return self.env.close()\n\n def seed(self, seed=None):\n return self.env.seed(seed)\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n return self.env.compute_reward(achieved_goal, desired_goal, info)\n\n def __str__(self):\n return \"<{}{}>\".format(type(self).__name__, self.env)\n\n def __repr__(self):\n return str(self)\n\n @property\n def unwrapped(self):\n return self.env.unwrapped\n\n\nclass ObservationWrapper(Wrapper):\n def reset(self, **kwargs):\n observation = self.env.reset(**kwargs)\n return self.observation(observation)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return self.observation(observation), reward, done, info\n\n def observation(self, observation):\n raise NotImplementedError\n\n\nclass RewardWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n observation, reward, done, info = self.env.step(action)\n return observation, self.reward(reward), done, info\n\n def reward(self, reward):\n raise NotImplementedError\n\n\nclass ActionWrapper(Wrapper):\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n def step(self, action):\n return self.env.step(self.action(action))\n\n def action(self, action):\n raise NotImplementedError\n\n def reverse_action(self, action):\n raise NotImplementedError\n", "path": "gym/core.py"}]} | 3,649 | 490 |
gh_patches_debug_35929 | rasdani/github-patches | git_diff | litestar-org__litestar-3295 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: Path parameters missing from OpenAPI schema when not included in handler signature
### Description
When defining a path parameter, but not using it in the handler signature, the path parameter is not documented in the OpenAPI schema. This is likely due to how we handle their extraction, which is based on the handler.
This is an issue though because, even if the parameter value is not used in the handler itself, it is still required to specify and should therefore be documented.
### URL to code causing the issue
_No response_
### MCVE
```python
@get("/{param:str}")
async def handler() -> None:
...
app = Litestar([handler])
assert app.openapi_schema.paths["/{param}"].get.parameters
```
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
2.7.1
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/3290">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of litestar/_openapi/parameters.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING
4
5 from litestar._openapi.schema_generation import SchemaCreator
6 from litestar._openapi.schema_generation.utils import get_formatted_examples
7 from litestar.constants import RESERVED_KWARGS
8 from litestar.enums import ParamType
9 from litestar.exceptions import ImproperlyConfiguredException
10 from litestar.openapi.spec.parameter import Parameter
11 from litestar.openapi.spec.schema import Schema
12 from litestar.params import DependencyKwarg, ParameterKwarg
13 from litestar.types import Empty
14 from litestar.typing import FieldDefinition
15
16 if TYPE_CHECKING:
17 from litestar._openapi.datastructures import OpenAPIContext
18 from litestar.handlers.base import BaseRouteHandler
19 from litestar.openapi.spec import Reference
20 from litestar.types.internal_types import PathParameterDefinition
21
22 __all__ = ("create_parameters_for_handler",)
23
24
25 class ParameterCollection:
26 """Facilitates conditional deduplication of parameters.
27
28 If multiple parameters with the same name are produced for a handler, the condition is ignored if the two
29 ``Parameter`` instances are the same (the first is retained and any duplicates are ignored). If the ``Parameter``
30 instances are not the same, an exception is raised.
31 """
32
33 def __init__(self, route_handler: BaseRouteHandler) -> None:
34 """Initialize ``ParameterCollection``.
35
36 Args:
37 route_handler: Associated route handler
38 """
39 self.route_handler = route_handler
40 self._parameters: dict[tuple[str, str], Parameter] = {}
41
42 def add(self, parameter: Parameter) -> None:
43 """Add a ``Parameter`` to the collection.
44
45 If an existing parameter with the same name and type already exists, the
46 parameter is ignored.
47
48 If an existing parameter with the same name but different type exists, raises
49 ``ImproperlyConfiguredException``.
50 """
51
52 if (parameter.name, parameter.param_in) not in self._parameters:
53 # because we are defining routes as unique per path, we have to handle here a situation when there is an optional
54 # path parameter. e.g. get(path=["/", "/{param:str}"]). When parsing the parameter for path, the route handler
55 # would still have a kwarg called param:
56 # def handler(param: str | None) -> ...
57 if parameter.param_in != ParamType.QUERY or all(
58 f"{{{parameter.name}:" not in path for path in self.route_handler.paths
59 ):
60 self._parameters[(parameter.name, parameter.param_in)] = parameter
61 return
62
63 pre_existing = self._parameters[(parameter.name, parameter.param_in)]
64 if parameter == pre_existing:
65 return
66
67 raise ImproperlyConfiguredException(
68 f"OpenAPI schema generation for handler `{self.route_handler}` detected multiple parameters named "
69 f"'{parameter.name}' with different types."
70 )
71
72 def list(self) -> list[Parameter]:
73 """Return a list of all ``Parameter``'s in the collection."""
74 return list(self._parameters.values())
75
76
77 class ParameterFactory:
78 """Factory for creating OpenAPI Parameters for a given route handler."""
79
80 def __init__(
81 self,
82 context: OpenAPIContext,
83 route_handler: BaseRouteHandler,
84 path_parameters: tuple[PathParameterDefinition, ...],
85 ) -> None:
86 """Initialize ParameterFactory.
87
88 Args:
89 context: The OpenAPI context.
90 route_handler: The route handler.
91 path_parameters: The path parameters for the route.
92 """
93 self.context = context
94 self.schema_creator = SchemaCreator.from_openapi_context(self.context, prefer_alias=True)
95 self.route_handler = route_handler
96 self.parameters = ParameterCollection(route_handler)
97 self.dependency_providers = route_handler.resolve_dependencies()
98 self.layered_parameters = route_handler.resolve_layered_parameters()
99 self.path_parameters_names = {p.name for p in path_parameters}
100
101 def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:
102 """Create an OpenAPI Parameter instance for a field definition.
103
104 Args:
105 field_definition: The field definition.
106 parameter_name: The name of the parameter.
107 """
108
109 result: Schema | Reference | None = None
110 kwarg_definition = (
111 field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None
112 )
113
114 if parameter_name in self.path_parameters_names:
115 param_in = ParamType.PATH
116 is_required = True
117 result = self.schema_creator.for_field_definition(field_definition)
118 elif kwarg_definition and kwarg_definition.header:
119 parameter_name = kwarg_definition.header
120 param_in = ParamType.HEADER
121 is_required = field_definition.is_required
122 elif kwarg_definition and kwarg_definition.cookie:
123 parameter_name = kwarg_definition.cookie
124 param_in = ParamType.COOKIE
125 is_required = field_definition.is_required
126 else:
127 is_required = field_definition.is_required
128 param_in = ParamType.QUERY
129 parameter_name = kwarg_definition.query if kwarg_definition and kwarg_definition.query else parameter_name
130
131 if not result:
132 result = self.schema_creator.for_field_definition(field_definition)
133
134 schema = result if isinstance(result, Schema) else self.context.schema_registry.from_reference(result).schema
135
136 examples_list = kwarg_definition.examples or [] if kwarg_definition else []
137 examples = get_formatted_examples(field_definition, examples_list)
138
139 return Parameter(
140 description=schema.description,
141 name=parameter_name,
142 param_in=param_in,
143 required=is_required,
144 schema=result,
145 examples=examples or None,
146 )
147
148 def get_layered_parameter(self, field_name: str, field_definition: FieldDefinition) -> Parameter:
149 """Create a parameter for a field definition that has a KwargDefinition defined on the layers.
150
151 Args:
152 field_name: The name of the field.
153 field_definition: The field definition.
154 """
155 layer_field = self.layered_parameters[field_name]
156
157 field = field_definition if field_definition.is_parameter_field else layer_field
158 default = layer_field.default if field_definition.has_default else field_definition.default
159 annotation = field_definition.annotation if field_definition is not Empty else layer_field.annotation
160
161 parameter_name = field_name
162 if isinstance(field.kwarg_definition, ParameterKwarg):
163 parameter_name = (
164 field.kwarg_definition.query
165 or field.kwarg_definition.header
166 or field.kwarg_definition.cookie
167 or field_name
168 )
169
170 field_definition = FieldDefinition.from_kwarg(
171 inner_types=field.inner_types,
172 default=default,
173 extra=field.extra,
174 annotation=annotation,
175 kwarg_definition=field.kwarg_definition,
176 name=field_name,
177 )
178 return self.create_parameter(field_definition=field_definition, parameter_name=parameter_name)
179
180 def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinition]) -> None:
181 """Add Parameter models to the handler's collection for the given field definitions.
182
183 Args:
184 fields: The field definitions.
185 """
186 unique_handler_fields = (
187 (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k not in self.layered_parameters
188 )
189 unique_layered_fields = (
190 (k, v) for k, v in self.layered_parameters.items() if k not in RESERVED_KWARGS and k not in fields
191 )
192 intersection_fields = (
193 (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k in self.layered_parameters
194 )
195
196 for field_name, field_definition in unique_handler_fields:
197 if (
198 isinstance(field_definition.kwarg_definition, DependencyKwarg)
199 and field_name not in self.dependency_providers
200 ):
201 # never document explicit dependencies
202 continue
203
204 if provider := self.dependency_providers.get(field_name):
205 self.create_parameters_for_field_definitions(fields=provider.parsed_fn_signature.parameters)
206 else:
207 self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))
208
209 for field_name, field_definition in unique_layered_fields:
210 self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))
211
212 for field_name, field_definition in intersection_fields:
213 self.parameters.add(self.get_layered_parameter(field_name=field_name, field_definition=field_definition))
214
215 def create_parameters_for_handler(self) -> list[Parameter]:
216 """Create a list of path/query/header Parameter models for the given PathHandler."""
217 handler_fields = self.route_handler.parsed_fn_signature.parameters
218 self.create_parameters_for_field_definitions(handler_fields)
219 return self.parameters.list()
220
221
222 def create_parameters_for_handler(
223 context: OpenAPIContext,
224 route_handler: BaseRouteHandler,
225 path_parameters: tuple[PathParameterDefinition, ...],
226 ) -> list[Parameter]:
227 """Create a list of path/query/header Parameter models for the given PathHandler."""
228 factory = ParameterFactory(
229 context=context,
230 route_handler=route_handler,
231 path_parameters=path_parameters,
232 )
233 return factory.create_parameters_for_handler()
234
[end of litestar/_openapi/parameters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py
--- a/litestar/_openapi/parameters.py
+++ b/litestar/_openapi/parameters.py
@@ -96,7 +96,7 @@
self.parameters = ParameterCollection(route_handler)
self.dependency_providers = route_handler.resolve_dependencies()
self.layered_parameters = route_handler.resolve_layered_parameters()
- self.path_parameters_names = {p.name for p in path_parameters}
+ self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}
def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:
"""Create an OpenAPI Parameter instance for a field definition.
@@ -111,7 +111,7 @@
field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None
)
- if parameter_name in self.path_parameters_names:
+ if parameter_name in self.path_parameters:
param_in = ParamType.PATH
is_required = True
result = self.schema_creator.for_field_definition(field_definition)
@@ -215,6 +215,17 @@
def create_parameters_for_handler(self) -> list[Parameter]:
"""Create a list of path/query/header Parameter models for the given PathHandler."""
handler_fields = self.route_handler.parsed_fn_signature.parameters
+ # not all path parameters have to be consumed by the handler. Because even not
+ # consumed path parameters must still be specified, we create stub parameters
+ # for the unconsumed ones so a correct OpenAPI schema can be generated
+ params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()
+ handler_fields.update(
+ {
+ param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)
+ for param_name in params_not_consumed_by_handler
+ }
+ )
+
self.create_parameters_for_field_definitions(handler_fields)
return self.parameters.list()
| {"golden_diff": "diff --git a/litestar/_openapi/parameters.py b/litestar/_openapi/parameters.py\n--- a/litestar/_openapi/parameters.py\n+++ b/litestar/_openapi/parameters.py\n@@ -96,7 +96,7 @@\n self.parameters = ParameterCollection(route_handler)\n self.dependency_providers = route_handler.resolve_dependencies()\n self.layered_parameters = route_handler.resolve_layered_parameters()\n- self.path_parameters_names = {p.name for p in path_parameters}\n+ self.path_parameters: dict[str, PathParameterDefinition] = {p.name: p for p in path_parameters}\n \n def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:\n \"\"\"Create an OpenAPI Parameter instance for a field definition.\n@@ -111,7 +111,7 @@\n field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None\n )\n \n- if parameter_name in self.path_parameters_names:\n+ if parameter_name in self.path_parameters:\n param_in = ParamType.PATH\n is_required = True\n result = self.schema_creator.for_field_definition(field_definition)\n@@ -215,6 +215,17 @@\n def create_parameters_for_handler(self) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n handler_fields = self.route_handler.parsed_fn_signature.parameters\n+ # not all path parameters have to be consumed by the handler. Because even not\n+ # consumed path parameters must still be specified, we create stub parameters\n+ # for the unconsumed ones so a correct OpenAPI schema can be generated\n+ params_not_consumed_by_handler = set(self.path_parameters) - handler_fields.keys()\n+ handler_fields.update(\n+ {\n+ param_name: FieldDefinition.from_kwarg(self.path_parameters[param_name].type, name=param_name)\n+ for param_name in params_not_consumed_by_handler\n+ }\n+ )\n+\n self.create_parameters_for_field_definitions(handler_fields)\n return self.parameters.list()\n", "issue": "Bug: Path parameters missing from OpenAPI schema when not included in handler signature\n### Description\r\n\r\nWhen defining a path parameter, but not using it in the handler signature, the path parameter is not documented in the OpenAPI schema. This is likely due to how we handle their extraction, which is based on the handler. \r\n\r\nThis is an issue though because, even if the parameter value is not used in the handler itself, it is still required to specify and should therefore be documented. \r\n\r\n### URL to code causing the issue\r\n\r\n_No response_\r\n\r\n### MCVE\r\n\r\n```python\r\n@get(\"/{param:str}\")\r\nasync def handler() -> None:\r\n ...\r\n\r\napp = Litestar([handler])\r\nassert app.openapi_schema.paths[\"/{param}\"].get.parameters\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\n_No response_\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Litestar Version\r\n\r\n2.7.1 \r\n\r\n### Platform\r\n\r\n- [X] Linux\r\n- [ ] Mac\r\n- [ ] Windows\r\n- [ ] Other (Please specify in the description above)\r\n\r\n<!-- POLAR PLEDGE BADGE START -->\r\n---\r\n> [!NOTE] \r\n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \r\n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\r\n>\r\n> Check out all issues funded or available for funding [on our Polar.sh dashboard](https://polar.sh/litestar-org)\r\n> * If you would like to see an issue prioritized, make a pledge towards it!\r\n> * We receive the pledge once the issue is completed & verified\r\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\r\n\r\n<a href=\"https://polar.sh/litestar-org/litestar/issues/3290\">\r\n<picture>\r\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg?darkmode=1\">\r\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/3290/pledge.svg\">\r\n</picture>\r\n</a>\r\n<!-- POLAR PLEDGE BADGE END -->\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nfrom litestar._openapi.schema_generation import SchemaCreator\nfrom litestar._openapi.schema_generation.utils import get_formatted_examples\nfrom litestar.constants import RESERVED_KWARGS\nfrom litestar.enums import ParamType\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.openapi.spec.parameter import Parameter\nfrom litestar.openapi.spec.schema import Schema\nfrom litestar.params import DependencyKwarg, ParameterKwarg\nfrom litestar.types import Empty\nfrom litestar.typing import FieldDefinition\n\nif TYPE_CHECKING:\n from litestar._openapi.datastructures import OpenAPIContext\n from litestar.handlers.base import BaseRouteHandler\n from litestar.openapi.spec import Reference\n from litestar.types.internal_types import PathParameterDefinition\n\n__all__ = (\"create_parameters_for_handler\",)\n\n\nclass ParameterCollection:\n \"\"\"Facilitates conditional deduplication of parameters.\n\n If multiple parameters with the same name are produced for a handler, the condition is ignored if the two\n ``Parameter`` instances are the same (the first is retained and any duplicates are ignored). If the ``Parameter``\n instances are not the same, an exception is raised.\n \"\"\"\n\n def __init__(self, route_handler: BaseRouteHandler) -> None:\n \"\"\"Initialize ``ParameterCollection``.\n\n Args:\n route_handler: Associated route handler\n \"\"\"\n self.route_handler = route_handler\n self._parameters: dict[tuple[str, str], Parameter] = {}\n\n def add(self, parameter: Parameter) -> None:\n \"\"\"Add a ``Parameter`` to the collection.\n\n If an existing parameter with the same name and type already exists, the\n parameter is ignored.\n\n If an existing parameter with the same name but different type exists, raises\n ``ImproperlyConfiguredException``.\n \"\"\"\n\n if (parameter.name, parameter.param_in) not in self._parameters:\n # because we are defining routes as unique per path, we have to handle here a situation when there is an optional\n # path parameter. e.g. get(path=[\"/\", \"/{param:str}\"]). When parsing the parameter for path, the route handler\n # would still have a kwarg called param:\n # def handler(param: str | None) -> ...\n if parameter.param_in != ParamType.QUERY or all(\n f\"{{{parameter.name}:\" not in path for path in self.route_handler.paths\n ):\n self._parameters[(parameter.name, parameter.param_in)] = parameter\n return\n\n pre_existing = self._parameters[(parameter.name, parameter.param_in)]\n if parameter == pre_existing:\n return\n\n raise ImproperlyConfiguredException(\n f\"OpenAPI schema generation for handler `{self.route_handler}` detected multiple parameters named \"\n f\"'{parameter.name}' with different types.\"\n )\n\n def list(self) -> list[Parameter]:\n \"\"\"Return a list of all ``Parameter``'s in the collection.\"\"\"\n return list(self._parameters.values())\n\n\nclass ParameterFactory:\n \"\"\"Factory for creating OpenAPI Parameters for a given route handler.\"\"\"\n\n def __init__(\n self,\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n ) -> None:\n \"\"\"Initialize ParameterFactory.\n\n Args:\n context: The OpenAPI context.\n route_handler: The route handler.\n path_parameters: The path parameters for the route.\n \"\"\"\n self.context = context\n self.schema_creator = SchemaCreator.from_openapi_context(self.context, prefer_alias=True)\n self.route_handler = route_handler\n self.parameters = ParameterCollection(route_handler)\n self.dependency_providers = route_handler.resolve_dependencies()\n self.layered_parameters = route_handler.resolve_layered_parameters()\n self.path_parameters_names = {p.name for p in path_parameters}\n\n def create_parameter(self, field_definition: FieldDefinition, parameter_name: str) -> Parameter:\n \"\"\"Create an OpenAPI Parameter instance for a field definition.\n\n Args:\n field_definition: The field definition.\n parameter_name: The name of the parameter.\n \"\"\"\n\n result: Schema | Reference | None = None\n kwarg_definition = (\n field_definition.kwarg_definition if isinstance(field_definition.kwarg_definition, ParameterKwarg) else None\n )\n\n if parameter_name in self.path_parameters_names:\n param_in = ParamType.PATH\n is_required = True\n result = self.schema_creator.for_field_definition(field_definition)\n elif kwarg_definition and kwarg_definition.header:\n parameter_name = kwarg_definition.header\n param_in = ParamType.HEADER\n is_required = field_definition.is_required\n elif kwarg_definition and kwarg_definition.cookie:\n parameter_name = kwarg_definition.cookie\n param_in = ParamType.COOKIE\n is_required = field_definition.is_required\n else:\n is_required = field_definition.is_required\n param_in = ParamType.QUERY\n parameter_name = kwarg_definition.query if kwarg_definition and kwarg_definition.query else parameter_name\n\n if not result:\n result = self.schema_creator.for_field_definition(field_definition)\n\n schema = result if isinstance(result, Schema) else self.context.schema_registry.from_reference(result).schema\n\n examples_list = kwarg_definition.examples or [] if kwarg_definition else []\n examples = get_formatted_examples(field_definition, examples_list)\n\n return Parameter(\n description=schema.description,\n name=parameter_name,\n param_in=param_in,\n required=is_required,\n schema=result,\n examples=examples or None,\n )\n\n def get_layered_parameter(self, field_name: str, field_definition: FieldDefinition) -> Parameter:\n \"\"\"Create a parameter for a field definition that has a KwargDefinition defined on the layers.\n\n Args:\n field_name: The name of the field.\n field_definition: The field definition.\n \"\"\"\n layer_field = self.layered_parameters[field_name]\n\n field = field_definition if field_definition.is_parameter_field else layer_field\n default = layer_field.default if field_definition.has_default else field_definition.default\n annotation = field_definition.annotation if field_definition is not Empty else layer_field.annotation\n\n parameter_name = field_name\n if isinstance(field.kwarg_definition, ParameterKwarg):\n parameter_name = (\n field.kwarg_definition.query\n or field.kwarg_definition.header\n or field.kwarg_definition.cookie\n or field_name\n )\n\n field_definition = FieldDefinition.from_kwarg(\n inner_types=field.inner_types,\n default=default,\n extra=field.extra,\n annotation=annotation,\n kwarg_definition=field.kwarg_definition,\n name=field_name,\n )\n return self.create_parameter(field_definition=field_definition, parameter_name=parameter_name)\n\n def create_parameters_for_field_definitions(self, fields: dict[str, FieldDefinition]) -> None:\n \"\"\"Add Parameter models to the handler's collection for the given field definitions.\n\n Args:\n fields: The field definitions.\n \"\"\"\n unique_handler_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k not in self.layered_parameters\n )\n unique_layered_fields = (\n (k, v) for k, v in self.layered_parameters.items() if k not in RESERVED_KWARGS and k not in fields\n )\n intersection_fields = (\n (k, v) for k, v in fields.items() if k not in RESERVED_KWARGS and k in self.layered_parameters\n )\n\n for field_name, field_definition in unique_handler_fields:\n if (\n isinstance(field_definition.kwarg_definition, DependencyKwarg)\n and field_name not in self.dependency_providers\n ):\n # never document explicit dependencies\n continue\n\n if provider := self.dependency_providers.get(field_name):\n self.create_parameters_for_field_definitions(fields=provider.parsed_fn_signature.parameters)\n else:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in unique_layered_fields:\n self.parameters.add(self.create_parameter(field_definition=field_definition, parameter_name=field_name))\n\n for field_name, field_definition in intersection_fields:\n self.parameters.add(self.get_layered_parameter(field_name=field_name, field_definition=field_definition))\n\n def create_parameters_for_handler(self) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n handler_fields = self.route_handler.parsed_fn_signature.parameters\n self.create_parameters_for_field_definitions(handler_fields)\n return self.parameters.list()\n\n\ndef create_parameters_for_handler(\n context: OpenAPIContext,\n route_handler: BaseRouteHandler,\n path_parameters: tuple[PathParameterDefinition, ...],\n) -> list[Parameter]:\n \"\"\"Create a list of path/query/header Parameter models for the given PathHandler.\"\"\"\n factory = ParameterFactory(\n context=context,\n route_handler=route_handler,\n path_parameters=path_parameters,\n )\n return factory.create_parameters_for_handler()\n", "path": "litestar/_openapi/parameters.py"}]} | 3,589 | 457 |
gh_patches_debug_1640 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1430 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
measure.label is documented under morphology.label
In the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).
</issue>
<code>
[start of skimage/measure/__init__.py]
1 from ._find_contours import find_contours
2 from ._marching_cubes import (marching_cubes, mesh_surface_area,
3 correct_mesh_orientation)
4 from ._regionprops import regionprops, perimeter
5 from ._structural_similarity import structural_similarity
6 from ._polygon import approximate_polygon, subdivide_polygon
7 from ._pnpoly import points_in_poly, grid_points_in_poly
8 from ._moments import moments, moments_central, moments_normalized, moments_hu
9 from .profile import profile_line
10 from .fit import LineModel, CircleModel, EllipseModel, ransac
11 from .block import block_reduce
12 from ._ccomp import label
13
14
15 __all__ = ['find_contours',
16 'regionprops',
17 'perimeter',
18 'structural_similarity',
19 'approximate_polygon',
20 'subdivide_polygon',
21 'LineModel',
22 'CircleModel',
23 'EllipseModel',
24 'ransac',
25 'block_reduce',
26 'moments',
27 'moments_central',
28 'moments_normalized',
29 'moments_hu',
30 'marching_cubes',
31 'mesh_surface_area',
32 'correct_mesh_orientation',
33 'profile_line',
34 'label',
35 'points_in_poly',
36 'grid_points_in_poly']
37
[end of skimage/measure/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py
--- a/skimage/measure/__init__.py
+++ b/skimage/measure/__init__.py
@@ -9,7 +9,7 @@
from .profile import profile_line
from .fit import LineModel, CircleModel, EllipseModel, ransac
from .block import block_reduce
-from ._ccomp import label
+from ._label import label
__all__ = ['find_contours',
| {"golden_diff": "diff --git a/skimage/measure/__init__.py b/skimage/measure/__init__.py\n--- a/skimage/measure/__init__.py\n+++ b/skimage/measure/__init__.py\n@@ -9,7 +9,7 @@\n from .profile import profile_line\n from .fit import LineModel, CircleModel, EllipseModel, ransac\n from .block import block_reduce\n-from ._ccomp import label\n+from ._label import label\n \n \n __all__ = ['find_contours',\n", "issue": "measure.label is documented under morphology.label\nIn the [measure API reference](http://scikit-image.org/docs/stable/api/skimage.measure.html) label is not documented, but it is [documented under morphology module](http://scikit-image.org/docs/stable/api/skimage.morphology.html#label) (which is depreciated).\n\n", "before_files": [{"content": "from ._find_contours import find_contours\nfrom ._marching_cubes import (marching_cubes, mesh_surface_area,\n correct_mesh_orientation)\nfrom ._regionprops import regionprops, perimeter\nfrom ._structural_similarity import structural_similarity\nfrom ._polygon import approximate_polygon, subdivide_polygon\nfrom ._pnpoly import points_in_poly, grid_points_in_poly\nfrom ._moments import moments, moments_central, moments_normalized, moments_hu\nfrom .profile import profile_line\nfrom .fit import LineModel, CircleModel, EllipseModel, ransac\nfrom .block import block_reduce\nfrom ._ccomp import label\n\n\n__all__ = ['find_contours',\n 'regionprops',\n 'perimeter',\n 'structural_similarity',\n 'approximate_polygon',\n 'subdivide_polygon',\n 'LineModel',\n 'CircleModel',\n 'EllipseModel',\n 'ransac',\n 'block_reduce',\n 'moments',\n 'moments_central',\n 'moments_normalized',\n 'moments_hu',\n 'marching_cubes',\n 'mesh_surface_area',\n 'correct_mesh_orientation',\n 'profile_line',\n 'label',\n 'points_in_poly',\n 'grid_points_in_poly']\n", "path": "skimage/measure/__init__.py"}]} | 938 | 114 |
gh_patches_debug_38564 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-1555 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
service domain name will be changed after relaunch due to PR 1543
ElasticDL workers use service domain names to connect to PS pods.
Service domain names should not be changed after PS pods relaunch.
https://github.com/sql-machine-learning/elasticdl/pull/1543 causes a regression that will change the service domain name after PS relaunch.
```
[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:139:_update_addr] addr list before update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-1.kubemaker.svc:2222']
[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:143:_update_addr] addr list after update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-2.kubemaker.svc:2222']
```
</issue>
<code>
[start of elasticdl/python/master/k8s_instance_manager.py]
1 import copy
2 import itertools
3 import threading
4 from collections import Counter
5
6 from elasticdl.python.common import k8s_client as k8s
7 from elasticdl.python.common.log_utils import default_logger as logger
8
9 _SERVICE_ADDR_SEP = ","
10
11
12 class InstanceManager(object):
13 def __init__(
14 self,
15 task_d,
16 num_workers=1,
17 worker_command=None,
18 worker_args=None,
19 worker_resource_request="cpu=1,memory=4096Mi",
20 worker_resource_limit="cpu=1,memory=4096Mi",
21 worker_pod_priority=None,
22 num_ps=0,
23 ps_command=None,
24 ps_args=None,
25 ps_resource_request="cpu=1,memory=4096Mi",
26 ps_resource_limit="cpu=1,memory=4096Mi",
27 ps_pod_priority=None,
28 volume=None,
29 image_pull_policy=None,
30 restart_policy="Never",
31 envs=None,
32 **kwargs
33 ):
34 self._num_workers = num_workers
35 self._worker_command = worker_command
36 self._worker_args = worker_args
37 self._worker_resource_request = worker_resource_request
38 self._worker_resource_limit = worker_resource_limit
39 self._worker_pod_priority = worker_pod_priority
40
41 self._num_ps = num_ps
42 self._ps_command = ps_command
43 self._ps_args = ps_args
44 self._ps_resource_request = ps_resource_request
45 self._ps_resource_limit = ps_resource_limit
46 self._ps_pod_priority = ps_pod_priority
47
48 self._restart_policy = restart_policy
49 self._volume = volume
50 self._image_pull_policy = image_pull_policy
51 self._envs = envs
52 self._task_d = task_d
53 self._next_worker_id = itertools.count().__next__
54 self._next_ps_id = itertools.count().__next__
55
56 # Protects followed variables, which are accessed from event_cb.
57 self._lock = threading.Lock()
58 # worker id to (pod name, phase) mapping
59 # phase: None/Pending/Running/Succeeded/Failed/Unknown
60 # None: worker was just launched, haven't received event yet.
61 # Pending: worker pod not started yet
62 # Running: worker pod is running
63 # Succeeded: worker pod finishes all tasks and terminates with
64 # no issue.
65 # Failed: worker pod is killed for some reason
66 # Unknown: unknown
67 self._worker_pods_phase = {}
68 # pod name to worker id mapping
69 self._worker_pod_name_to_id = {}
70
71 self._relaunch_deleted_live_worker = True
72
73 self._ps_pods_phase = {}
74 self._ps_pod_name_to_id = {}
75 self._relaunch_deleted_live_ps = True
76
77 self._k8s_client = k8s.Client(event_callback=self._event_cb, **kwargs)
78 self._ps_addrs = self._get_addrs(
79 self._num_ps, self._k8s_client.get_ps_service_address
80 )
81 # TODO: Select a worker address to be used for broadcasting model
82 # parameters under allreduce-strategy.
83 self._worker_addrs = self._get_addrs(
84 self._num_workers, self._k8s_client.get_worker_service_address
85 )
86
87 def _start_worker(self, worker_id):
88 logger.info("Starting worker: %d" % worker_id)
89 with self._lock:
90 pod = self._k8s_client.create_worker(
91 worker_id=worker_id,
92 resource_requests=self._worker_resource_request,
93 resource_limits=self._worker_resource_limit,
94 pod_priority=self._worker_pod_priority,
95 volume=self._volume,
96 image_pull_policy=self._image_pull_policy,
97 command=self._worker_command,
98 args=self._worker_args
99 + ["--worker_id", str(worker_id)]
100 + ["--ps_addrs", self._ps_addrs],
101 restart_policy=self._restart_policy,
102 ps_addrs=self._ps_addrs,
103 envs=copy.deepcopy(self._envs),
104 )
105 name = pod.metadata.name
106 self._worker_pod_name_to_id[name] = worker_id
107 self._worker_pods_phase[worker_id] = (name, None)
108 self._k8s_client.create_worker_service(worker_id)
109
110 def _start_ps(self, ps_id):
111 logger.info("Starting PS: %d" % ps_id)
112 with self._lock:
113 pod = self._k8s_client.create_ps(
114 ps_id=ps_id,
115 resource_requests=self._ps_resource_request,
116 resource_limits=self._ps_resource_limit,
117 pod_priority=self._ps_pod_priority,
118 volume=self._volume,
119 image_pull_policy=self._image_pull_policy,
120 command=self._ps_command,
121 args=self._ps_args + ["--ps_id", str(ps_id)],
122 restart_policy=self._restart_policy,
123 envs=copy.deepcopy(self._envs),
124 )
125 name = pod.metadata.name
126 self._ps_pod_name_to_id[name] = ps_id
127 self._ps_pods_phase[ps_id] = (name, None)
128 self._k8s_client.create_ps_service(ps_id)
129
130 def _get_addrs(self, num_addrs, addr_get_fn):
131 addrs = []
132 for addr_id in range(num_addrs):
133 addrs.append(addr_get_fn(addr_id))
134 return _SERVICE_ADDR_SEP.join(addrs)
135
136 @staticmethod
137 def _update_addr(old_addr, new_addr, addrs, addr_get_fn):
138 addrs_list = addrs.split(_SERVICE_ADDR_SEP)
139 addrs_list[addrs_list.index(addr_get_fn(old_addr))] = addr_get_fn(
140 new_addr
141 )
142 return _SERVICE_ADDR_SEP.join(addrs_list)
143
144 def update_status(self, status):
145 master_name = self._k8s_client.get_master_pod_name()
146 self._k8s_client.patch_labels_to_pod(
147 master_name, labels_dict={"status": status}
148 )
149
150 def start_workers(self):
151 for _ in range(self._num_workers):
152 self._start_worker(self._next_worker_id())
153
154 def start_parameter_servers(self):
155 for _ in range(self._num_ps):
156 self._start_ps(self._next_ps_id())
157
158 def _remove_worker(self, worker_id):
159 logger.info("Removing worker: %d", worker_id)
160 with self._lock:
161 if worker_id not in self._worker_pods_phase:
162 logger.error("Unknown worker id: %s" % worker_id)
163 return
164
165 # TODO: change _k8s_client to accept pod name instead of worker id.
166 self._k8s_client.delete_worker(worker_id)
167
168 def _remove_ps(self, ps_id):
169 logger.info("Removing PS: %d", ps_id)
170 with self._lock:
171 if ps_id not in self._ps_pods_phase:
172 logger.error("Unknown PS id: %s" % ps_id)
173 return
174
175 self._k8s_client.delete_ps(ps_id)
176
177 def stop_relaunch_and_remove_workers(self):
178 with self._lock:
179 self._relaunch_deleted_live_worker = False
180 for worker_id in self._worker_pods_phase:
181 self._k8s_client.delete_worker(worker_id)
182
183 def stop_relaunch_and_remove_all_ps(self):
184 with self._lock:
185 self._relaunch_deleted_live_ps = False
186 for ps_id in self._ps_pods_phase:
187 self._k8s_client.delete_ps(ps_id)
188
189 def get_worker_counter(self):
190 with self._lock:
191 return Counter([v for _, v in self._worker_pods_phase.values()])
192
193 def get_ps_counter(self):
194 with self._lock:
195 return Counter([v for _, v in self._ps_pods_phase.values()])
196
197 def _event_cb(self, event):
198 evt_obj = event.get("object")
199 evt_type = event.get("type")
200 if not evt_obj or not evt_type:
201 logger.error("Event doesn't have object or type: %s" % event)
202 return
203
204 if evt_obj.kind != "Pod":
205 # We only care about pod related events
206 return
207
208 pod_name = evt_obj.metadata.name
209 phase = evt_obj.status.phase
210 logger.info(
211 "Got event %s, phase %s for pod: %s" % (evt_type, phase, pod_name)
212 )
213 if pod_name == self._k8s_client.get_master_pod_name():
214 # No need to care about master pod
215 return
216
217 relaunch_worker = False
218 relaunch_ps = False
219 worker_id = None
220 ps_id = None
221 with self._lock:
222 if pod_name in self._worker_pod_name_to_id:
223 worker_id = self._worker_pod_name_to_id.get(pod_name)
224 self._worker_pods_phase[worker_id] = (pod_name, phase)
225 if evt_type == "DELETED":
226 del self._worker_pods_phase[worker_id]
227 del self._worker_pod_name_to_id[pod_name]
228 self._task_d.recover_tasks(worker_id)
229
230 # If a deleted pod was not "Succeeded", relaunch a worker.
231 relaunch_worker = (
232 self._relaunch_deleted_live_worker
233 and phase != "Succeeded"
234 )
235
236 elif pod_name in self._ps_pod_name_to_id:
237 ps_id = self._ps_pod_name_to_id.get(pod_name)
238 self._ps_pods_phase[ps_id] = (pod_name, phase)
239 if evt_type == "DELETED":
240 del self._ps_pods_phase[ps_id]
241 del self._ps_pod_name_to_id[pod_name]
242 relaunch_ps = self._relaunch_deleted_live_ps
243 else:
244 logger.error("Unknown pod name: %s" % pod_name)
245 return
246
247 if relaunch_worker and worker_id:
248 logger.info("Relaunching worker.")
249 new_worker_id = self._next_worker_id()
250 self._start_worker(new_worker_id)
251 self._update_addr(
252 worker_id,
253 new_worker_id,
254 self._worker_addrs,
255 addr_get_fn=self._k8s_client.get_worker_service_address,
256 )
257 elif relaunch_ps and ps_id:
258 logger.info("Relaunching ps.")
259 new_ps_id = self._next_ps_id()
260 self._start_ps(new_ps_id)
261 self._update_addr(
262 ps_id,
263 new_ps_id,
264 self._ps_addrs,
265 addr_get_fn=self._k8s_client.get_ps_service_address,
266 )
267
268 @property
269 def ps_addrs(self):
270 return self._ps_addrs
271
[end of elasticdl/python/master/k8s_instance_manager.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticdl/python/master/k8s_instance_manager.py b/elasticdl/python/master/k8s_instance_manager.py
--- a/elasticdl/python/master/k8s_instance_manager.py
+++ b/elasticdl/python/master/k8s_instance_manager.py
@@ -51,7 +51,6 @@
self._envs = envs
self._task_d = task_d
self._next_worker_id = itertools.count().__next__
- self._next_ps_id = itertools.count().__next__
# Protects followed variables, which are accessed from event_cb.
self._lock = threading.Lock()
@@ -152,8 +151,8 @@
self._start_worker(self._next_worker_id())
def start_parameter_servers(self):
- for _ in range(self._num_ps):
- self._start_ps(self._next_ps_id())
+ for i in range(self._num_ps):
+ self._start_ps(i)
def _remove_worker(self, worker_id):
logger.info("Removing worker: %d", worker_id)
@@ -216,8 +215,8 @@
relaunch_worker = False
relaunch_ps = False
- worker_id = None
- ps_id = None
+ worker_id = -1
+ ps_id = -1
with self._lock:
if pod_name in self._worker_pod_name_to_id:
worker_id = self._worker_pod_name_to_id.get(pod_name)
@@ -244,7 +243,7 @@
logger.error("Unknown pod name: %s" % pod_name)
return
- if relaunch_worker and worker_id:
+ if relaunch_worker and worker_id >= 0:
logger.info("Relaunching worker.")
new_worker_id = self._next_worker_id()
self._start_worker(new_worker_id)
@@ -254,16 +253,12 @@
self._worker_addrs,
addr_get_fn=self._k8s_client.get_worker_service_address,
)
- elif relaunch_ps and ps_id:
+ elif relaunch_ps:
logger.info("Relaunching ps.")
- new_ps_id = self._next_ps_id()
- self._start_ps(new_ps_id)
- self._update_addr(
- ps_id,
- new_ps_id,
- self._ps_addrs,
- addr_get_fn=self._k8s_client.get_ps_service_address,
- )
+ # Note: the ID and service address for relaunched parameter
+ # server are intentionally left unchanged to support fault
+ # tolerance.
+ self._start_ps(ps_id)
@property
def ps_addrs(self):
| {"golden_diff": "diff --git a/elasticdl/python/master/k8s_instance_manager.py b/elasticdl/python/master/k8s_instance_manager.py\n--- a/elasticdl/python/master/k8s_instance_manager.py\n+++ b/elasticdl/python/master/k8s_instance_manager.py\n@@ -51,7 +51,6 @@\n self._envs = envs\n self._task_d = task_d\n self._next_worker_id = itertools.count().__next__\n- self._next_ps_id = itertools.count().__next__\n \n # Protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n@@ -152,8 +151,8 @@\n self._start_worker(self._next_worker_id())\n \n def start_parameter_servers(self):\n- for _ in range(self._num_ps):\n- self._start_ps(self._next_ps_id())\n+ for i in range(self._num_ps):\n+ self._start_ps(i)\n \n def _remove_worker(self, worker_id):\n logger.info(\"Removing worker: %d\", worker_id)\n@@ -216,8 +215,8 @@\n \n relaunch_worker = False\n relaunch_ps = False\n- worker_id = None\n- ps_id = None\n+ worker_id = -1\n+ ps_id = -1\n with self._lock:\n if pod_name in self._worker_pod_name_to_id:\n worker_id = self._worker_pod_name_to_id.get(pod_name)\n@@ -244,7 +243,7 @@\n logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n \n- if relaunch_worker and worker_id:\n+ if relaunch_worker and worker_id >= 0:\n logger.info(\"Relaunching worker.\")\n new_worker_id = self._next_worker_id()\n self._start_worker(new_worker_id)\n@@ -254,16 +253,12 @@\n self._worker_addrs,\n addr_get_fn=self._k8s_client.get_worker_service_address,\n )\n- elif relaunch_ps and ps_id:\n+ elif relaunch_ps:\n logger.info(\"Relaunching ps.\")\n- new_ps_id = self._next_ps_id()\n- self._start_ps(new_ps_id)\n- self._update_addr(\n- ps_id,\n- new_ps_id,\n- self._ps_addrs,\n- addr_get_fn=self._k8s_client.get_ps_service_address,\n- )\n+ # Note: the ID and service address for relaunched parameter\n+ # server are intentionally left unchanged to support fault\n+ # tolerance.\n+ self._start_ps(ps_id)\n \n @property\n def ps_addrs(self):\n", "issue": "service domain name will be changed after relaunch due to PR 1543\nElasticDL workers use service domain names to connect to PS pods. \r\nService domain names should not be changed after PS pods relaunch.\r\nhttps://github.com/sql-machine-learning/elasticdl/pull/1543 causes a regression that will change the service domain name after PS relaunch.\r\n\r\n```\r\n[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:139:_update_addr] addr list before update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-1.kubemaker.svc:2222']\r\n[2019-12-05 11:31:19,096] [INFO] [k8s_instance_manager.py:143:_update_addr] addr list after update is ['elasticdl-test-embedding-edl-ps-0.kubemaker.svc:2222', 'elasticdl-test-embedding-edl-ps-2.kubemaker.svc:2222']\r\n```\n", "before_files": [{"content": "import copy\nimport itertools\nimport threading\nfrom collections import Counter\n\nfrom elasticdl.python.common import k8s_client as k8s\nfrom elasticdl.python.common.log_utils import default_logger as logger\n\n_SERVICE_ADDR_SEP = \",\"\n\n\nclass InstanceManager(object):\n def __init__(\n self,\n task_d,\n num_workers=1,\n worker_command=None,\n worker_args=None,\n worker_resource_request=\"cpu=1,memory=4096Mi\",\n worker_resource_limit=\"cpu=1,memory=4096Mi\",\n worker_pod_priority=None,\n num_ps=0,\n ps_command=None,\n ps_args=None,\n ps_resource_request=\"cpu=1,memory=4096Mi\",\n ps_resource_limit=\"cpu=1,memory=4096Mi\",\n ps_pod_priority=None,\n volume=None,\n image_pull_policy=None,\n restart_policy=\"Never\",\n envs=None,\n **kwargs\n ):\n self._num_workers = num_workers\n self._worker_command = worker_command\n self._worker_args = worker_args\n self._worker_resource_request = worker_resource_request\n self._worker_resource_limit = worker_resource_limit\n self._worker_pod_priority = worker_pod_priority\n\n self._num_ps = num_ps\n self._ps_command = ps_command\n self._ps_args = ps_args\n self._ps_resource_request = ps_resource_request\n self._ps_resource_limit = ps_resource_limit\n self._ps_pod_priority = ps_pod_priority\n\n self._restart_policy = restart_policy\n self._volume = volume\n self._image_pull_policy = image_pull_policy\n self._envs = envs\n self._task_d = task_d\n self._next_worker_id = itertools.count().__next__\n self._next_ps_id = itertools.count().__next__\n\n # Protects followed variables, which are accessed from event_cb.\n self._lock = threading.Lock()\n # worker id to (pod name, phase) mapping\n # phase: None/Pending/Running/Succeeded/Failed/Unknown\n # None: worker was just launched, haven't received event yet.\n # Pending: worker pod not started yet\n # Running: worker pod is running\n # Succeeded: worker pod finishes all tasks and terminates with\n # no issue.\n # Failed: worker pod is killed for some reason\n # Unknown: unknown\n self._worker_pods_phase = {}\n # pod name to worker id mapping\n self._worker_pod_name_to_id = {}\n\n self._relaunch_deleted_live_worker = True\n\n self._ps_pods_phase = {}\n self._ps_pod_name_to_id = {}\n self._relaunch_deleted_live_ps = True\n\n self._k8s_client = k8s.Client(event_callback=self._event_cb, **kwargs)\n self._ps_addrs = self._get_addrs(\n self._num_ps, self._k8s_client.get_ps_service_address\n )\n # TODO: Select a worker address to be used for broadcasting model\n # parameters under allreduce-strategy.\n self._worker_addrs = self._get_addrs(\n self._num_workers, self._k8s_client.get_worker_service_address\n )\n\n def _start_worker(self, worker_id):\n logger.info(\"Starting worker: %d\" % worker_id)\n with self._lock:\n pod = self._k8s_client.create_worker(\n worker_id=worker_id,\n resource_requests=self._worker_resource_request,\n resource_limits=self._worker_resource_limit,\n pod_priority=self._worker_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._worker_command,\n args=self._worker_args\n + [\"--worker_id\", str(worker_id)]\n + [\"--ps_addrs\", self._ps_addrs],\n restart_policy=self._restart_policy,\n ps_addrs=self._ps_addrs,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._worker_pod_name_to_id[name] = worker_id\n self._worker_pods_phase[worker_id] = (name, None)\n self._k8s_client.create_worker_service(worker_id)\n\n def _start_ps(self, ps_id):\n logger.info(\"Starting PS: %d\" % ps_id)\n with self._lock:\n pod = self._k8s_client.create_ps(\n ps_id=ps_id,\n resource_requests=self._ps_resource_request,\n resource_limits=self._ps_resource_limit,\n pod_priority=self._ps_pod_priority,\n volume=self._volume,\n image_pull_policy=self._image_pull_policy,\n command=self._ps_command,\n args=self._ps_args + [\"--ps_id\", str(ps_id)],\n restart_policy=self._restart_policy,\n envs=copy.deepcopy(self._envs),\n )\n name = pod.metadata.name\n self._ps_pod_name_to_id[name] = ps_id\n self._ps_pods_phase[ps_id] = (name, None)\n self._k8s_client.create_ps_service(ps_id)\n\n def _get_addrs(self, num_addrs, addr_get_fn):\n addrs = []\n for addr_id in range(num_addrs):\n addrs.append(addr_get_fn(addr_id))\n return _SERVICE_ADDR_SEP.join(addrs)\n\n @staticmethod\n def _update_addr(old_addr, new_addr, addrs, addr_get_fn):\n addrs_list = addrs.split(_SERVICE_ADDR_SEP)\n addrs_list[addrs_list.index(addr_get_fn(old_addr))] = addr_get_fn(\n new_addr\n )\n return _SERVICE_ADDR_SEP.join(addrs_list)\n\n def update_status(self, status):\n master_name = self._k8s_client.get_master_pod_name()\n self._k8s_client.patch_labels_to_pod(\n master_name, labels_dict={\"status\": status}\n )\n\n def start_workers(self):\n for _ in range(self._num_workers):\n self._start_worker(self._next_worker_id())\n\n def start_parameter_servers(self):\n for _ in range(self._num_ps):\n self._start_ps(self._next_ps_id())\n\n def _remove_worker(self, worker_id):\n logger.info(\"Removing worker: %d\", worker_id)\n with self._lock:\n if worker_id not in self._worker_pods_phase:\n logger.error(\"Unknown worker id: %s\" % worker_id)\n return\n\n # TODO: change _k8s_client to accept pod name instead of worker id.\n self._k8s_client.delete_worker(worker_id)\n\n def _remove_ps(self, ps_id):\n logger.info(\"Removing PS: %d\", ps_id)\n with self._lock:\n if ps_id not in self._ps_pods_phase:\n logger.error(\"Unknown PS id: %s\" % ps_id)\n return\n\n self._k8s_client.delete_ps(ps_id)\n\n def stop_relaunch_and_remove_workers(self):\n with self._lock:\n self._relaunch_deleted_live_worker = False\n for worker_id in self._worker_pods_phase:\n self._k8s_client.delete_worker(worker_id)\n\n def stop_relaunch_and_remove_all_ps(self):\n with self._lock:\n self._relaunch_deleted_live_ps = False\n for ps_id in self._ps_pods_phase:\n self._k8s_client.delete_ps(ps_id)\n\n def get_worker_counter(self):\n with self._lock:\n return Counter([v for _, v in self._worker_pods_phase.values()])\n\n def get_ps_counter(self):\n with self._lock:\n return Counter([v for _, v in self._ps_pods_phase.values()])\n\n def _event_cb(self, event):\n evt_obj = event.get(\"object\")\n evt_type = event.get(\"type\")\n if not evt_obj or not evt_type:\n logger.error(\"Event doesn't have object or type: %s\" % event)\n return\n\n if evt_obj.kind != \"Pod\":\n # We only care about pod related events\n return\n\n pod_name = evt_obj.metadata.name\n phase = evt_obj.status.phase\n logger.info(\n \"Got event %s, phase %s for pod: %s\" % (evt_type, phase, pod_name)\n )\n if pod_name == self._k8s_client.get_master_pod_name():\n # No need to care about master pod\n return\n\n relaunch_worker = False\n relaunch_ps = False\n worker_id = None\n ps_id = None\n with self._lock:\n if pod_name in self._worker_pod_name_to_id:\n worker_id = self._worker_pod_name_to_id.get(pod_name)\n self._worker_pods_phase[worker_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._worker_pods_phase[worker_id]\n del self._worker_pod_name_to_id[pod_name]\n self._task_d.recover_tasks(worker_id)\n\n # If a deleted pod was not \"Succeeded\", relaunch a worker.\n relaunch_worker = (\n self._relaunch_deleted_live_worker\n and phase != \"Succeeded\"\n )\n\n elif pod_name in self._ps_pod_name_to_id:\n ps_id = self._ps_pod_name_to_id.get(pod_name)\n self._ps_pods_phase[ps_id] = (pod_name, phase)\n if evt_type == \"DELETED\":\n del self._ps_pods_phase[ps_id]\n del self._ps_pod_name_to_id[pod_name]\n relaunch_ps = self._relaunch_deleted_live_ps\n else:\n logger.error(\"Unknown pod name: %s\" % pod_name)\n return\n\n if relaunch_worker and worker_id:\n logger.info(\"Relaunching worker.\")\n new_worker_id = self._next_worker_id()\n self._start_worker(new_worker_id)\n self._update_addr(\n worker_id,\n new_worker_id,\n self._worker_addrs,\n addr_get_fn=self._k8s_client.get_worker_service_address,\n )\n elif relaunch_ps and ps_id:\n logger.info(\"Relaunching ps.\")\n new_ps_id = self._next_ps_id()\n self._start_ps(new_ps_id)\n self._update_addr(\n ps_id,\n new_ps_id,\n self._ps_addrs,\n addr_get_fn=self._k8s_client.get_ps_service_address,\n )\n\n @property\n def ps_addrs(self):\n return self._ps_addrs\n", "path": "elasticdl/python/master/k8s_instance_manager.py"}]} | 3,814 | 598 |
gh_patches_debug_27583 | rasdani/github-patches | git_diff | arviz-devs__arviz-1988 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plot_density has no filter_vars argument
I think it should have the argument. It may be a little more complicated given the multiple idata input but is should still be feasible.
</issue>
<code>
[start of arviz/plots/densityplot.py]
1 """KDE and histogram plots for multiple variables."""
2 import warnings
3
4 from ..data import convert_to_dataset
5 from ..labels import BaseLabeller
6 from ..sel_utils import (
7 xarray_var_iter,
8 )
9 from ..rcparams import rcParams
10 from ..utils import _var_names
11 from .plot_utils import default_grid, get_plotting_function
12
13
14 # pylint:disable-msg=too-many-function-args
15 def plot_density(
16 data,
17 group="posterior",
18 data_labels=None,
19 var_names=None,
20 transform=None,
21 hdi_prob=None,
22 point_estimate="auto",
23 colors="cycle",
24 outline=True,
25 hdi_markers="",
26 shade=0.0,
27 bw="default",
28 circular=False,
29 grid=None,
30 figsize=None,
31 textsize=None,
32 labeller=None,
33 ax=None,
34 backend=None,
35 backend_kwargs=None,
36 show=None,
37 ):
38 """Generate KDE plots for continuous variables and histograms for discrete ones.
39
40 Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per
41 variable and colors assigned to models.
42
43 Parameters
44 ----------
45 data : Union[Object, Iterator[Object]]
46 Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator
47 returning a sequence of such objects.
48 Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.
49 group: Optional[str]
50 Specifies which :class:`arviz.InferenceData` group should be plotted.
51 Defaults to 'posterior'.
52 Alternative values include 'prior' and any other strings used as dataset keys in the
53 :class:`arviz.InferenceData`.
54 data_labels : Optional[List[str]]
55 List with names for the datasets passed as "data." Useful when plotting more than one
56 dataset. Must be the same shape as the data parameter. Defaults to None.
57 var_names: Optional[List[str]]
58 List of variables to plot. If multiple datasets are supplied and var_names is not None,
59 will print the same set of variables for each dataset. Defaults to None, which results in
60 all the variables being plotted.
61 transform : callable
62 Function to transform data (defaults to None i.e. the identity function)
63 hdi_prob : float
64 Probability for the highest density interval. Should be in the interval (0, 1].
65 Defaults to 0.94.
66 point_estimate : Optional[str]
67 Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.
68 Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.
69 colors : Optional[Union[List[str],str]]
70 List with valid matplotlib colors, one color per model. Alternative a string can be passed.
71 If the string is `cycle`, it will automatically choose a color per model from matplotlib's
72 cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
73 models. Defaults to `cycle`.
74 outline : bool
75 Use a line to draw KDEs and histograms. Default to True
76 hdi_markers : str
77 A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density
78 interval. Defaults to empty string (no marker).
79 shade : Optional[float]
80 Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
81 (opaque). Defaults to 0.
82 bw: Optional[float or str]
83 If numeric, indicates the bandwidth and must be positive.
84 If str, indicates the method to estimate the bandwidth and must be
85 one of "scott", "silverman", "isj" or "experimental" when `circular` is False
86 and "taylor" (for now) when `circular` is True.
87 Defaults to "default" which means "experimental" when variable is not circular
88 and "taylor" when it is.
89 circular: Optional[bool]
90 If True, it interprets the values passed are from a circular variable measured in radians
91 and a circular KDE is used. Only valid for 1D KDE. Defaults to False.
92 grid : tuple
93 Number of rows and columns. Defaults to None, the rows and columns are
94 automatically inferred.
95 figsize : Optional[Tuple[int, int]]
96 Figure size. If None it will be defined automatically.
97 textsize: Optional[float]
98 Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
99 on ``figsize``.
100 labeller : labeller instance, optional
101 Class providing the method ``make_label_vert`` to generate the labels in the plot titles.
102 Read the :ref:`label_guide` for more details and usage examples.
103 ax: numpy array-like of matplotlib axes or bokeh figures, optional
104 A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
105 its own array of plot areas (and return it).
106 backend: str, optional
107 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
108 backend_kwargs: bool, optional
109 These are kwargs specific to the backend being used, passed to
110 :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
111 For additional documentation check the plotting method of the backend.
112 show : bool, optional
113 Call backend show function.
114
115 Returns
116 -------
117 axes : matplotlib axes or bokeh figures
118
119 See Also
120 --------
121 plot_dist : Plot distribution as histogram or kernel density estimates.
122 plot_posterior : Plot Posterior densities in the style of John K. Kruschke’s book.
123
124 Examples
125 --------
126 Plot default density plot
127
128 .. plot::
129 :context: close-figs
130
131 >>> import arviz as az
132 >>> centered = az.load_arviz_data('centered_eight')
133 >>> non_centered = az.load_arviz_data('non_centered_eight')
134 >>> az.plot_density([centered, non_centered])
135
136 Plot variables in a 4x5 grid
137
138 .. plot::
139 :context: close-figs
140
141 >>> az.plot_density([centered, non_centered], grid=(4, 5))
142
143 Plot subset variables by specifying variable name exactly
144
145 .. plot::
146 :context: close-figs
147
148 >>> az.plot_density([centered, non_centered], var_names=["mu"])
149
150 Plot a specific `az.InferenceData` group
151
152 .. plot::
153 :context: close-figs
154
155 >>> az.plot_density([centered, non_centered], var_names=["mu"], group="prior")
156
157 Specify highest density interval
158
159 .. plot::
160 :context: close-figs
161
162 >>> az.plot_density([centered, non_centered], var_names=["mu"], hdi_prob=.5)
163
164 Shade plots and/or remove outlines
165
166 .. plot::
167 :context: close-figs
168
169 >>> az.plot_density([centered, non_centered], var_names=["mu"], outline=False, shade=.8)
170
171 Specify binwidth for kernel density estimation
172
173 .. plot::
174 :context: close-figs
175
176 >>> az.plot_density([centered, non_centered], var_names=["mu"], bw=.9)
177 """
178 if not isinstance(data, (list, tuple)):
179 datasets = [convert_to_dataset(data, group=group)]
180 else:
181 datasets = [convert_to_dataset(datum, group=group) for datum in data]
182
183 if transform is not None:
184 datasets = [transform(dataset) for dataset in datasets]
185
186 if labeller is None:
187 labeller = BaseLabeller()
188
189 var_names = _var_names(var_names, datasets)
190 n_data = len(datasets)
191
192 if data_labels is None:
193 if n_data > 1:
194 data_labels = [f"{idx}" for idx in range(n_data)]
195 else:
196 data_labels = [""]
197 elif len(data_labels) != n_data:
198 raise ValueError(
199 "The number of names for the models ({}) "
200 "does not match the number of models ({})".format(len(data_labels), n_data)
201 )
202
203 if hdi_prob is None:
204 hdi_prob = rcParams["stats.hdi_prob"]
205 else:
206 if not 1 >= hdi_prob > 0:
207 raise ValueError("The value of hdi_prob should be in the interval (0, 1]")
208
209 to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
210 all_labels = []
211 length_plotters = []
212 for plotters in to_plot:
213 length_plotters.append(len(plotters))
214 for var_name, selection, isel, _ in plotters:
215 label = labeller.make_label_vert(var_name, selection, isel)
216 if label not in all_labels:
217 all_labels.append(label)
218 length_plotters = len(all_labels)
219 max_plots = rcParams["plot.max_subplots"]
220 max_plots = length_plotters if max_plots is None else max_plots
221 if length_plotters > max_plots:
222 warnings.warn(
223 "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
224 "of variables to plot ({len_plotters}) in plot_density, generating only "
225 "{max_plots} plots".format(max_plots=max_plots, len_plotters=length_plotters),
226 UserWarning,
227 )
228 all_labels = all_labels[:max_plots]
229 to_plot = [
230 [
231 (var_name, selection, values)
232 for var_name, selection, isel, values in plotters
233 if labeller.make_label_vert(var_name, selection, isel) in all_labels
234 ]
235 for plotters in to_plot
236 ]
237 length_plotters = max_plots
238 rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)
239
240 if bw == "default":
241 if circular:
242 bw = "taylor"
243 else:
244 bw = "experimental"
245
246 plot_density_kwargs = dict(
247 ax=ax,
248 all_labels=all_labels,
249 to_plot=to_plot,
250 colors=colors,
251 bw=bw,
252 circular=circular,
253 figsize=figsize,
254 length_plotters=length_plotters,
255 rows=rows,
256 cols=cols,
257 textsize=textsize,
258 labeller=labeller,
259 hdi_prob=hdi_prob,
260 point_estimate=point_estimate,
261 hdi_markers=hdi_markers,
262 outline=outline,
263 shade=shade,
264 n_data=n_data,
265 data_labels=data_labels,
266 backend_kwargs=backend_kwargs,
267 show=show,
268 )
269
270 if backend is None:
271 backend = rcParams["plot.backend"]
272 backend = backend.lower()
273
274 # TODO: Add backend kwargs
275 plot = get_plotting_function("plot_density", "densityplot", backend)
276 ax = plot(**plot_density_kwargs)
277 return ax
278
[end of arviz/plots/densityplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py
--- a/arviz/plots/densityplot.py
+++ b/arviz/plots/densityplot.py
@@ -17,6 +17,7 @@
group="posterior",
data_labels=None,
var_names=None,
+ filter_vars=None,
transform=None,
hdi_prob=None,
point_estimate="auto",
@@ -58,6 +59,11 @@
List of variables to plot. If multiple datasets are supplied and var_names is not None,
will print the same set of variables for each dataset. Defaults to None, which results in
all the variables being plotted.
+ filter_vars: {None, "like", "regex"}, optional, default=None
+ If `None` (default), interpret var_names as the real variables names. If "like",
+ interpret var_names as substrings of the real variables names. If "regex",
+ interpret var_names as regular expressions on the real variables names. A la
+ ``pandas.filter``.
transform : callable
Function to transform data (defaults to None i.e. the identity function)
hdi_prob : float
@@ -186,7 +192,7 @@
if labeller is None:
labeller = BaseLabeller()
- var_names = _var_names(var_names, datasets)
+ var_names = _var_names(var_names, datasets, filter_vars)
n_data = len(datasets)
if data_labels is None:
| {"golden_diff": "diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py\n--- a/arviz/plots/densityplot.py\n+++ b/arviz/plots/densityplot.py\n@@ -17,6 +17,7 @@\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n+ filter_vars=None,\n transform=None,\n hdi_prob=None,\n point_estimate=\"auto\",\n@@ -58,6 +59,11 @@\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n+ filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n+ If `None` (default), interpret var_names as the real variables names. If \"like\",\n+ interpret var_names as substrings of the real variables names. If \"regex\",\n+ interpret var_names as regular expressions on the real variables names. A la\n+ ``pandas.filter``.\n transform : callable\n Function to transform data (defaults to None i.e. the identity function)\n hdi_prob : float\n@@ -186,7 +192,7 @@\n if labeller is None:\n labeller = BaseLabeller()\n \n- var_names = _var_names(var_names, datasets)\n+ var_names = _var_names(var_names, datasets, filter_vars)\n n_data = len(datasets)\n \n if data_labels is None:\n", "issue": "plot_density has no filter_vars argument\nI think it should have the argument. It may be a little more complicated given the multiple idata input but is should still be feasible.\r\n\n", "before_files": [{"content": "\"\"\"KDE and histogram plots for multiple variables.\"\"\"\nimport warnings\n\nfrom ..data import convert_to_dataset\nfrom ..labels import BaseLabeller\nfrom ..sel_utils import (\n xarray_var_iter,\n)\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names\nfrom .plot_utils import default_grid, get_plotting_function\n\n\n# pylint:disable-msg=too-many-function-args\ndef plot_density(\n data,\n group=\"posterior\",\n data_labels=None,\n var_names=None,\n transform=None,\n hdi_prob=None,\n point_estimate=\"auto\",\n colors=\"cycle\",\n outline=True,\n hdi_markers=\"\",\n shade=0.0,\n bw=\"default\",\n circular=False,\n grid=None,\n figsize=None,\n textsize=None,\n labeller=None,\n ax=None,\n backend=None,\n backend_kwargs=None,\n show=None,\n):\n \"\"\"Generate KDE plots for continuous variables and histograms for discrete ones.\n\n Plots are truncated at their 100*(1-alpha)% highest density intervals. Plots are grouped per\n variable and colors assigned to models.\n\n Parameters\n ----------\n data : Union[Object, Iterator[Object]]\n Any object that can be converted to an :class:`arviz.InferenceData` object, or an Iterator\n returning a sequence of such objects.\n Refer to documentation of :func:`arviz.convert_to_dataset` for details about such objects.\n group: Optional[str]\n Specifies which :class:`arviz.InferenceData` group should be plotted.\n Defaults to 'posterior'.\n Alternative values include 'prior' and any other strings used as dataset keys in the\n :class:`arviz.InferenceData`.\n data_labels : Optional[List[str]]\n List with names for the datasets passed as \"data.\" Useful when plotting more than one\n dataset. Must be the same shape as the data parameter. Defaults to None.\n var_names: Optional[List[str]]\n List of variables to plot. If multiple datasets are supplied and var_names is not None,\n will print the same set of variables for each dataset. Defaults to None, which results in\n all the variables being plotted.\n transform : callable\n Function to transform data (defaults to None i.e. the identity function)\n hdi_prob : float\n Probability for the highest density interval. Should be in the interval (0, 1].\n Defaults to 0.94.\n point_estimate : Optional[str]\n Plot point estimate per variable. Values should be 'mean', 'median', 'mode' or None.\n Defaults to 'auto' i.e. it falls back to default set in ``rcParams``.\n colors : Optional[Union[List[str],str]]\n List with valid matplotlib colors, one color per model. Alternative a string can be passed.\n If the string is `cycle`, it will automatically choose a color per model from matplotlib's\n cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all\n models. Defaults to `cycle`.\n outline : bool\n Use a line to draw KDEs and histograms. Default to True\n hdi_markers : str\n A valid `matplotlib.markers` like 'v', used to indicate the limits of the highest density\n interval. Defaults to empty string (no marker).\n shade : Optional[float]\n Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1\n (opaque). Defaults to 0.\n bw: Optional[float or str]\n If numeric, indicates the bandwidth and must be positive.\n If str, indicates the method to estimate the bandwidth and must be\n one of \"scott\", \"silverman\", \"isj\" or \"experimental\" when `circular` is False\n and \"taylor\" (for now) when `circular` is True.\n Defaults to \"default\" which means \"experimental\" when variable is not circular\n and \"taylor\" when it is.\n circular: Optional[bool]\n If True, it interprets the values passed are from a circular variable measured in radians\n and a circular KDE is used. Only valid for 1D KDE. Defaults to False.\n grid : tuple\n Number of rows and columns. Defaults to None, the rows and columns are\n automatically inferred.\n figsize : Optional[Tuple[int, int]]\n Figure size. If None it will be defined automatically.\n textsize: Optional[float]\n Text size scaling factor for labels, titles and lines. If None it will be autoscaled based\n on ``figsize``.\n labeller : labeller instance, optional\n Class providing the method ``make_label_vert`` to generate the labels in the plot titles.\n Read the :ref:`label_guide` for more details and usage examples.\n ax: numpy array-like of matplotlib axes or bokeh figures, optional\n A 2D array of locations into which to plot the densities. If not supplied, Arviz will create\n its own array of plot areas (and return it).\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used, passed to\n :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.\n For additional documentation check the plotting method of the backend.\n show : bool, optional\n Call backend show function.\n\n Returns\n -------\n axes : matplotlib axes or bokeh figures\n\n See Also\n --------\n plot_dist : Plot distribution as histogram or kernel density estimates.\n plot_posterior : Plot Posterior densities in the style of John K. Kruschke\u2019s book.\n\n Examples\n --------\n Plot default density plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> non_centered = az.load_arviz_data('non_centered_eight')\n >>> az.plot_density([centered, non_centered])\n\n Plot variables in a 4x5 grid\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], grid=(4, 5))\n\n Plot subset variables by specifying variable name exactly\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"])\n\n Plot a specific `az.InferenceData` group\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], group=\"prior\")\n\n Specify highest density interval\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], hdi_prob=.5)\n\n Shade plots and/or remove outlines\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], outline=False, shade=.8)\n\n Specify binwidth for kernel density estimation\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_density([centered, non_centered], var_names=[\"mu\"], bw=.9)\n \"\"\"\n if not isinstance(data, (list, tuple)):\n datasets = [convert_to_dataset(data, group=group)]\n else:\n datasets = [convert_to_dataset(datum, group=group) for datum in data]\n\n if transform is not None:\n datasets = [transform(dataset) for dataset in datasets]\n\n if labeller is None:\n labeller = BaseLabeller()\n\n var_names = _var_names(var_names, datasets)\n n_data = len(datasets)\n\n if data_labels is None:\n if n_data > 1:\n data_labels = [f\"{idx}\" for idx in range(n_data)]\n else:\n data_labels = [\"\"]\n elif len(data_labels) != n_data:\n raise ValueError(\n \"The number of names for the models ({}) \"\n \"does not match the number of models ({})\".format(len(data_labels), n_data)\n )\n\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]\n all_labels = []\n length_plotters = []\n for plotters in to_plot:\n length_plotters.append(len(plotters))\n for var_name, selection, isel, _ in plotters:\n label = labeller.make_label_vert(var_name, selection, isel)\n if label not in all_labels:\n all_labels.append(label)\n length_plotters = len(all_labels)\n max_plots = rcParams[\"plot.max_subplots\"]\n max_plots = length_plotters if max_plots is None else max_plots\n if length_plotters > max_plots:\n warnings.warn(\n \"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number \"\n \"of variables to plot ({len_plotters}) in plot_density, generating only \"\n \"{max_plots} plots\".format(max_plots=max_plots, len_plotters=length_plotters),\n UserWarning,\n )\n all_labels = all_labels[:max_plots]\n to_plot = [\n [\n (var_name, selection, values)\n for var_name, selection, isel, values in plotters\n if labeller.make_label_vert(var_name, selection, isel) in all_labels\n ]\n for plotters in to_plot\n ]\n length_plotters = max_plots\n rows, cols = default_grid(length_plotters, grid=grid, max_cols=3)\n\n if bw == \"default\":\n if circular:\n bw = \"taylor\"\n else:\n bw = \"experimental\"\n\n plot_density_kwargs = dict(\n ax=ax,\n all_labels=all_labels,\n to_plot=to_plot,\n colors=colors,\n bw=bw,\n circular=circular,\n figsize=figsize,\n length_plotters=length_plotters,\n rows=rows,\n cols=cols,\n textsize=textsize,\n labeller=labeller,\n hdi_prob=hdi_prob,\n point_estimate=point_estimate,\n hdi_markers=hdi_markers,\n outline=outline,\n shade=shade,\n n_data=n_data,\n data_labels=data_labels,\n backend_kwargs=backend_kwargs,\n show=show,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_density\", \"densityplot\", backend)\n ax = plot(**plot_density_kwargs)\n return ax\n", "path": "arviz/plots/densityplot.py"}]} | 3,716 | 342 |
gh_patches_debug_38927 | rasdani/github-patches | git_diff | ansible-collections__community.general-7357 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cargo fails to find standard homedir path for cargo binary
### Summary
the cargo module fails with -
`TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}`
cargo executable is located in default rustup install location `/home/username/.cargo/bin/`
### Issue Type
Bug Report
### Component Name
cargo
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.3]
config file = /home/username/foo/ansible.cfg
configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/lib/python3/dist-packages/ansible
ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections
executable location = /usr/bin/ansible
python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]
jinja version = 3.0.3
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /usr/lib/python3/dist-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.5.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
DEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']
```
### OS / Environment
Debian Bookworm targeting Bookworm
### Steps to Reproduce
<!--- Paste example playbooks or commands between quotes below -->
```yaml (paste below)
```
Run this module against home directory with the default rustup install location.
### Expected Results
cargo installs the package
### Actual Results
```console (paste below)
TASK [Install tokei Rust package] ***************************************************************************************************************************
fatal: [hostname]: FAILED! => {"changed": false, "msg": "Failed to find required executable \"cargo\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin"}
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/cargo.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3 # Copyright (c) 2021 Radek Sprta <[email protected]>
4 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
5 # SPDX-License-Identifier: GPL-3.0-or-later
6
7 from __future__ import absolute_import, division, print_function
8
9 __metaclass__ = type
10
11
12 DOCUMENTATION = r"""
13 ---
14 module: cargo
15 short_description: Manage Rust packages with cargo
16 version_added: 4.3.0
17 description:
18 - Manage Rust packages with cargo.
19 author: "Radek Sprta (@radek-sprta)"
20 extends_documentation_fragment:
21 - community.general.attributes
22 attributes:
23 check_mode:
24 support: full
25 diff_mode:
26 support: none
27 options:
28 name:
29 description:
30 - The name of a Rust package to install.
31 type: list
32 elements: str
33 required: true
34 path:
35 description:
36 ->
37 The base path where to install the Rust packages. Cargo automatically appends
38 V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).
39 type: path
40 version:
41 description:
42 ->
43 The version to install. If O(name) contains multiple values, the module will
44 try to install all of them in this version.
45 type: str
46 required: false
47 locked:
48 description:
49 - Install with locked dependencies.
50 - This is only used when installing packages.
51 required: false
52 type: bool
53 default: false
54 version_added: 7.5.0
55 state:
56 description:
57 - The state of the Rust package.
58 required: false
59 type: str
60 default: present
61 choices: [ "present", "absent", "latest" ]
62 requirements:
63 - cargo installed in bin path (recommended /usr/local/bin)
64 """
65
66 EXAMPLES = r"""
67 - name: Install "ludusavi" Rust package
68 community.general.cargo:
69 name: ludusavi
70
71 - name: Install "ludusavi" Rust package with locked dependencies
72 community.general.cargo:
73 name: ludusavi
74 locked: true
75
76 - name: Install "ludusavi" Rust package in version 0.10.0
77 community.general.cargo:
78 name: ludusavi
79 version: '0.10.0'
80
81 - name: Install "ludusavi" Rust package to global location
82 community.general.cargo:
83 name: ludusavi
84 path: /usr/local
85
86 - name: Remove "ludusavi" Rust package
87 community.general.cargo:
88 name: ludusavi
89 state: absent
90
91 - name: Update "ludusavi" Rust package its latest version
92 community.general.cargo:
93 name: ludusavi
94 state: latest
95 """
96
97 import os
98 import re
99
100 from ansible.module_utils.basic import AnsibleModule
101
102
103 class Cargo(object):
104 def __init__(self, module, **kwargs):
105 self.module = module
106 self.name = kwargs["name"]
107 self.path = kwargs["path"]
108 self.state = kwargs["state"]
109 self.version = kwargs["version"]
110 self.locked = kwargs["locked"]
111
112 self.executable = [module.get_bin_path("cargo", True)]
113
114 @property
115 def path(self):
116 return self._path
117
118 @path.setter
119 def path(self, path):
120 if path is not None and not os.path.isdir(path):
121 self.module.fail_json(msg="Path %s is not a directory" % path)
122 self._path = path
123
124 def _exec(
125 self, args, run_in_check_mode=False, check_rc=True, add_package_name=True
126 ):
127 if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
128 cmd = self.executable + args
129 rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
130 return out, err
131 return "", ""
132
133 def get_installed(self):
134 cmd = ["install", "--list"]
135 data, dummy = self._exec(cmd, True, False, False)
136
137 package_regex = re.compile(r"^([\w\-]+) v(.+):$")
138 installed = {}
139 for line in data.splitlines():
140 package_info = package_regex.match(line)
141 if package_info:
142 installed[package_info.group(1)] = package_info.group(2)
143
144 return installed
145
146 def install(self, packages=None):
147 cmd = ["install"]
148 cmd.extend(packages or self.name)
149 if self.locked:
150 cmd.append("--locked")
151 if self.path:
152 cmd.append("--root")
153 cmd.append(self.path)
154 if self.version:
155 cmd.append("--version")
156 cmd.append(self.version)
157 return self._exec(cmd)
158
159 def is_outdated(self, name):
160 installed_version = self.get_installed().get(name)
161
162 cmd = ["search", name, "--limit", "1"]
163 data, dummy = self._exec(cmd, True, False, False)
164
165 match = re.search(r'"(.+)"', data)
166 if match:
167 latest_version = match.group(1)
168
169 return installed_version != latest_version
170
171 def uninstall(self, packages=None):
172 cmd = ["uninstall"]
173 cmd.extend(packages or self.name)
174 return self._exec(cmd)
175
176
177 def main():
178 arg_spec = dict(
179 name=dict(required=True, type="list", elements="str"),
180 path=dict(default=None, type="path"),
181 state=dict(default="present", choices=["present", "absent", "latest"]),
182 version=dict(default=None, type="str"),
183 locked=dict(default=False, type="bool"),
184 )
185 module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
186
187 name = module.params["name"]
188 path = module.params["path"]
189 state = module.params["state"]
190 version = module.params["version"]
191 locked = module.params["locked"]
192
193 if not name:
194 module.fail_json(msg="Package name must be specified")
195
196 # Set LANG env since we parse stdout
197 module.run_command_environ_update = dict(
198 LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
199 )
200
201 cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
202 changed, out, err = False, None, None
203 installed_packages = cargo.get_installed()
204 if state == "present":
205 to_install = [
206 n
207 for n in name
208 if (n not in installed_packages)
209 or (version and version != installed_packages[n])
210 ]
211 if to_install:
212 changed = True
213 out, err = cargo.install(to_install)
214 elif state == "latest":
215 to_update = [
216 n for n in name if n not in installed_packages or cargo.is_outdated(n)
217 ]
218 if to_update:
219 changed = True
220 out, err = cargo.install(to_update)
221 else: # absent
222 to_uninstall = [n for n in name if n in installed_packages]
223 if to_uninstall:
224 changed = True
225 out, err = cargo.uninstall(to_uninstall)
226
227 module.exit_json(changed=changed, stdout=out, stderr=err)
228
229
230 if __name__ == "__main__":
231 main()
232
[end of plugins/modules/cargo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py
--- a/plugins/modules/cargo.py
+++ b/plugins/modules/cargo.py
@@ -25,6 +25,12 @@
diff_mode:
support: none
options:
+ executable:
+ description:
+ - Path to the C(cargo) installed in the system.
+ - If not specified, the module will look C(cargo) in E(PATH).
+ type: path
+ version_added: 7.5.0
name:
description:
- The name of a Rust package to install.
@@ -60,7 +66,7 @@
default: present
choices: [ "present", "absent", "latest" ]
requirements:
- - cargo installed in bin path (recommended /usr/local/bin)
+ - cargo installed
"""
EXAMPLES = r"""
@@ -103,14 +109,13 @@
class Cargo(object):
def __init__(self, module, **kwargs):
self.module = module
+ self.executable = [kwargs["executable"] or module.get_bin_path("cargo", True)]
self.name = kwargs["name"]
self.path = kwargs["path"]
self.state = kwargs["state"]
self.version = kwargs["version"]
self.locked = kwargs["locked"]
- self.executable = [module.get_bin_path("cargo", True)]
-
@property
def path(self):
return self._path
@@ -176,6 +181,7 @@
def main():
arg_spec = dict(
+ executable=dict(default=None, type="path"),
name=dict(required=True, type="list", elements="str"),
path=dict(default=None, type="path"),
state=dict(default="present", choices=["present", "absent", "latest"]),
@@ -185,10 +191,8 @@
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params["name"]
- path = module.params["path"]
state = module.params["state"]
version = module.params["version"]
- locked = module.params["locked"]
if not name:
module.fail_json(msg="Package name must be specified")
@@ -198,7 +202,7 @@
LANG="C", LC_ALL="C", LC_MESSAGES="C", LC_CTYPE="C"
)
- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)
+ cargo = Cargo(module, **module.params)
changed, out, err = False, None, None
installed_packages = cargo.get_installed()
if state == "present":
| {"golden_diff": "diff --git a/plugins/modules/cargo.py b/plugins/modules/cargo.py\n--- a/plugins/modules/cargo.py\n+++ b/plugins/modules/cargo.py\n@@ -25,6 +25,12 @@\n diff_mode:\n support: none\n options:\n+ executable:\n+ description:\n+ - Path to the C(cargo) installed in the system.\n+ - If not specified, the module will look C(cargo) in E(PATH).\n+ type: path\n+ version_added: 7.5.0\n name:\n description:\n - The name of a Rust package to install.\n@@ -60,7 +66,7 @@\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\n requirements:\n- - cargo installed in bin path (recommended /usr/local/bin)\n+ - cargo installed\n \"\"\"\n \n EXAMPLES = r\"\"\"\n@@ -103,14 +109,13 @@\n class Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n+ self.executable = [kwargs[\"executable\"] or module.get_bin_path(\"cargo\", True)]\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n \n- self.executable = [module.get_bin_path(\"cargo\", True)]\n-\n @property\n def path(self):\n return self._path\n@@ -176,6 +181,7 @@\n \n def main():\n arg_spec = dict(\n+ executable=dict(default=None, type=\"path\"),\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n@@ -185,10 +191,8 @@\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n \n name = module.params[\"name\"]\n- path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n- locked = module.params[\"locked\"]\n \n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n@@ -198,7 +202,7 @@\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n \n- cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n+ cargo = Cargo(module, **module.params)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n", "issue": "cargo fails to find standard homedir path for cargo binary\n### Summary\r\n\r\nthe cargo module fails with -\r\n\r\n`TASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}`\r\n\r\ncargo executable is located in default rustup install location `/home/username/.cargo/bin/` \r\n\r\n### Issue Type\r\n\r\nBug Report\r\n\r\n### Component Name\r\n\r\ncargo\r\n\r\n### Ansible Version\r\n\r\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.3]\r\n config file = /home/username/foo/ansible.cfg\r\n configured module search path = ['/home/username/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3/dist-packages/ansible\r\n ansible collection location = /home/username/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/bin/ansible\r\n python version = 3.9.10 (main, Feb 22 2022, 13:54:07) [GCC 11.2.0]\r\n jinja version = 3.0.3\r\n libyaml = True\r\n```\r\n\r\n\r\n### Community.general Version\r\n\r\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /usr/lib/python3/dist-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.5.0 \r\n```\r\n\r\n\r\n### Configuration\r\n\r\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\nDEFAULT_HOST_LIST(/home/username/foo/ansible.cfg) = ['/home/username/foo/HOSTS']\r\n```\r\n\r\n\r\n### OS / Environment\r\n\r\nDebian Bookworm targeting Bookworm\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n\r\n```\r\nRun this module against home directory with the default rustup install location.\r\n\r\n### Expected Results\r\n\r\ncargo installs the package\r\n\r\n### Actual Results\r\n\r\n```console (paste below)\r\nTASK [Install tokei Rust package] ***************************************************************************************************************************\r\nfatal: [hostname]: FAILED! => {\"changed\": false, \"msg\": \"Failed to find required executable \\\"cargo\\\" in paths: /usr/local/bin:/usr/bin:/bin:/usr/games:/sbin:/usr/sbin:/usr/local/sbin\"}\r\n```\r\n\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2021 Radek Sprta <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nDOCUMENTATION = r\"\"\"\n---\nmodule: cargo\nshort_description: Manage Rust packages with cargo\nversion_added: 4.3.0\ndescription:\n - Manage Rust packages with cargo.\nauthor: \"Radek Sprta (@radek-sprta)\"\nextends_documentation_fragment:\n - community.general.attributes\nattributes:\n check_mode:\n support: full\n diff_mode:\n support: none\noptions:\n name:\n description:\n - The name of a Rust package to install.\n type: list\n elements: str\n required: true\n path:\n description:\n ->\n The base path where to install the Rust packages. Cargo automatically appends\n V(/bin). In other words, V(/usr/local) will become V(/usr/local/bin).\n type: path\n version:\n description:\n ->\n The version to install. If O(name) contains multiple values, the module will\n try to install all of them in this version.\n type: str\n required: false\n locked:\n description:\n - Install with locked dependencies.\n - This is only used when installing packages.\n required: false\n type: bool\n default: false\n version_added: 7.5.0\n state:\n description:\n - The state of the Rust package.\n required: false\n type: str\n default: present\n choices: [ \"present\", \"absent\", \"latest\" ]\nrequirements:\n - cargo installed in bin path (recommended /usr/local/bin)\n\"\"\"\n\nEXAMPLES = r\"\"\"\n- name: Install \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n\n- name: Install \"ludusavi\" Rust package with locked dependencies\n community.general.cargo:\n name: ludusavi\n locked: true\n\n- name: Install \"ludusavi\" Rust package in version 0.10.0\n community.general.cargo:\n name: ludusavi\n version: '0.10.0'\n\n- name: Install \"ludusavi\" Rust package to global location\n community.general.cargo:\n name: ludusavi\n path: /usr/local\n\n- name: Remove \"ludusavi\" Rust package\n community.general.cargo:\n name: ludusavi\n state: absent\n\n- name: Update \"ludusavi\" Rust package its latest version\n community.general.cargo:\n name: ludusavi\n state: latest\n\"\"\"\n\nimport os\nimport re\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Cargo(object):\n def __init__(self, module, **kwargs):\n self.module = module\n self.name = kwargs[\"name\"]\n self.path = kwargs[\"path\"]\n self.state = kwargs[\"state\"]\n self.version = kwargs[\"version\"]\n self.locked = kwargs[\"locked\"]\n\n self.executable = [module.get_bin_path(\"cargo\", True)]\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n if path is not None and not os.path.isdir(path):\n self.module.fail_json(msg=\"Path %s is not a directory\" % path)\n self._path = path\n\n def _exec(\n self, args, run_in_check_mode=False, check_rc=True, add_package_name=True\n ):\n if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):\n cmd = self.executable + args\n rc, out, err = self.module.run_command(cmd, check_rc=check_rc)\n return out, err\n return \"\", \"\"\n\n def get_installed(self):\n cmd = [\"install\", \"--list\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n package_regex = re.compile(r\"^([\\w\\-]+) v(.+):$\")\n installed = {}\n for line in data.splitlines():\n package_info = package_regex.match(line)\n if package_info:\n installed[package_info.group(1)] = package_info.group(2)\n\n return installed\n\n def install(self, packages=None):\n cmd = [\"install\"]\n cmd.extend(packages or self.name)\n if self.locked:\n cmd.append(\"--locked\")\n if self.path:\n cmd.append(\"--root\")\n cmd.append(self.path)\n if self.version:\n cmd.append(\"--version\")\n cmd.append(self.version)\n return self._exec(cmd)\n\n def is_outdated(self, name):\n installed_version = self.get_installed().get(name)\n\n cmd = [\"search\", name, \"--limit\", \"1\"]\n data, dummy = self._exec(cmd, True, False, False)\n\n match = re.search(r'\"(.+)\"', data)\n if match:\n latest_version = match.group(1)\n\n return installed_version != latest_version\n\n def uninstall(self, packages=None):\n cmd = [\"uninstall\"]\n cmd.extend(packages or self.name)\n return self._exec(cmd)\n\n\ndef main():\n arg_spec = dict(\n name=dict(required=True, type=\"list\", elements=\"str\"),\n path=dict(default=None, type=\"path\"),\n state=dict(default=\"present\", choices=[\"present\", \"absent\", \"latest\"]),\n version=dict(default=None, type=\"str\"),\n locked=dict(default=False, type=\"bool\"),\n )\n module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)\n\n name = module.params[\"name\"]\n path = module.params[\"path\"]\n state = module.params[\"state\"]\n version = module.params[\"version\"]\n locked = module.params[\"locked\"]\n\n if not name:\n module.fail_json(msg=\"Package name must be specified\")\n\n # Set LANG env since we parse stdout\n module.run_command_environ_update = dict(\n LANG=\"C\", LC_ALL=\"C\", LC_MESSAGES=\"C\", LC_CTYPE=\"C\"\n )\n\n cargo = Cargo(module, name=name, path=path, state=state, version=version, locked=locked)\n changed, out, err = False, None, None\n installed_packages = cargo.get_installed()\n if state == \"present\":\n to_install = [\n n\n for n in name\n if (n not in installed_packages)\n or (version and version != installed_packages[n])\n ]\n if to_install:\n changed = True\n out, err = cargo.install(to_install)\n elif state == \"latest\":\n to_update = [\n n for n in name if n not in installed_packages or cargo.is_outdated(n)\n ]\n if to_update:\n changed = True\n out, err = cargo.install(to_update)\n else: # absent\n to_uninstall = [n for n in name if n in installed_packages]\n if to_uninstall:\n changed = True\n out, err = cargo.uninstall(to_uninstall)\n\n module.exit_json(changed=changed, stdout=out, stderr=err)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/cargo.py"}]} | 3,311 | 600 |
gh_patches_debug_3114 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-108 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing zmq and jsonschema
It seems like two additional dependencies are missing.
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/utils/zmqrelated.py", line 35, in check_for_zmq
import zmq
ImportError: No module named 'zmq'
```
``` bash
Traceback (most recent call last):
File "/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/nbformat/validator.py", line 10, in <module>
from jsonschema import ValidationError
ImportError: No module named 'jsonschema'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # coding: utf-8
3
4 # Copyright (c) Juptyer Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 #-----------------------------------------------------------------------------
8 # Minimal Python version sanity check (from IPython)
9 #-----------------------------------------------------------------------------
10
11 from __future__ import print_function
12
13 import os
14 import sys
15
16 v = sys.version_info
17 if v[:2] < (3,3):
18 error = "ERROR: Jupyter Hub requires Python version 3.3 or above."
19 print(error, file=sys.stderr)
20 sys.exit(1)
21
22
23 if os.name in ('nt', 'dos'):
24 error = "ERROR: Windows is not supported"
25 print(error, file=sys.stderr)
26
27 # At least we're on the python version we need, move on.
28
29 import os
30
31 from glob import glob
32
33 from distutils.core import setup
34 from subprocess import check_call
35
36 pjoin = os.path.join
37
38 here = os.path.abspath(os.path.dirname(__file__))
39 share_jupyter = pjoin(here, 'share', 'jupyter')
40 static = pjoin(share_jupyter, 'static')
41
42 #---------------------------------------------------------------------------
43 # Build basic package data, etc.
44 #---------------------------------------------------------------------------
45
46 def get_data_files():
47 """Get data files in share/jupyter"""
48
49 data_files = []
50 ntrim = len(here) + 1
51
52 for (d, dirs, filenames) in os.walk(share_jupyter):
53 data_files.append((
54 d[ntrim:],
55 [ pjoin(d, f) for f in filenames ]
56 ))
57 return data_files
58
59
60 ns = {}
61 with open(pjoin(here, 'jupyterhub', 'version.py')) as f:
62 exec(f.read(), {}, ns)
63
64
65 packages = []
66 for d, _, _ in os.walk('jupyterhub'):
67 if os.path.exists(pjoin(d, '__init__.py')):
68 packages.append(d.replace(os.path.sep, '.'))
69
70 setup_args = dict(
71 name = 'jupyterhub',
72 scripts = glob(pjoin('scripts', '*')),
73 packages = packages,
74 # dummy, so that install_data doesn't get skipped
75 # this will be overridden when bower is run anyway
76 data_files = get_data_files() or ['dummy'],
77 version = ns['__version__'],
78 description = """JupyterHub: A multi-user server for Jupyter notebooks""",
79 long_description = "",
80 author = "Jupyter Development Team",
81 author_email = "[email protected]",
82 url = "http://jupyter.org",
83 license = "BSD",
84 platforms = "Linux, Mac OS X",
85 keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],
86 classifiers = [
87 'Intended Audience :: Developers',
88 'Intended Audience :: System Administrators',
89 'Intended Audience :: Science/Research',
90 'License :: OSI Approved :: BSD License',
91 'Programming Language :: Python',
92 'Programming Language :: Python :: 3',
93 ],
94 )
95
96 #---------------------------------------------------------------------------
97 # custom distutils commands
98 #---------------------------------------------------------------------------
99
100 # imports here, so they are after setuptools import if there was one
101 from distutils.cmd import Command
102 from distutils.command.install import install
103
104 class BaseCommand(Command):
105 """Dumb empty command because Command needs subclasses to override too much"""
106 user_options = []
107
108 def initialize_options(self):
109 pass
110
111 def finalize_options(self):
112 pass
113
114 def get_inputs(self):
115 return []
116
117 def get_outputs(self):
118 return []
119
120
121 class Bower(BaseCommand):
122 description = "fetch static client-side components with bower"
123
124 user_options = []
125
126 def run(self):
127 try:
128 check_call(['bower', 'install', '--allow-root'])
129 except OSError as e:
130 print("Failed to run bower: %s" % e, file=sys.stderr)
131 print("You can install bower with `npm install -g bower`", file=sys.stderr)
132 raise
133 # update data-files in case this created new files
134 self.distribution.data_files = get_data_files()
135
136 class CSS(BaseCommand):
137 description = "compile CSS from LESS"
138
139 user_options = []
140
141 def initialize_options(self):
142 pass
143
144 def finalize_options(self):
145 pass
146
147 def run(self):
148 style_less = pjoin(static, 'less', 'style.less')
149 style_css = pjoin(static, 'css', 'style.min.css')
150 sourcemap = style_css + '.map'
151 try:
152 check_call([
153 'lessc', '-x', '--verbose',
154 '--source-map-basepath={}'.format(static),
155 '--source-map={}'.format(sourcemap),
156 '--source-map-rootpath=../',
157 style_less, style_css,
158 ])
159 except OSError as e:
160 print("Failed to run lessc: %s" % e, file=sys.stderr)
161 print("You can install less with `npm install -g less`", file=sys.stderr)
162 raise
163 # update data-files in case this created new files
164 self.distribution.data_files = get_data_files()
165
166 # ensure bower is run as part of install
167 install.sub_commands.insert(0, ('js', None))
168 install.sub_commands.insert(1, ('css', None))
169
170 setup_args['cmdclass'] = {
171 'js': Bower,
172 'css': CSS,
173 }
174
175
176 # setuptools requirements
177
178 if 'setuptools' in sys.modules:
179 setup_args['zip_safe'] = False
180 from setuptools.command.develop import develop
181 class develop_js_css(develop):
182 def run(self):
183 if not self.uninstall:
184 self.distribution.run_command('js')
185 self.distribution.run_command('css')
186 develop.run(self)
187 setup_args['cmdclass']['develop'] = develop_js_css
188 setup_args['install_requires'] = install_requires = []
189
190 with open('requirements.txt') as f:
191 for line in f.readlines():
192 req = line.strip()
193 if not req or req.startswith(('-e', '#')):
194 continue
195 install_requires.append(req)
196
197 #---------------------------------------------------------------------------
198 # setup
199 #---------------------------------------------------------------------------
200
201 def main():
202 setup(**setup_args)
203
204 if __name__ == '__main__':
205 main()
206
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -190,7 +190,7 @@
with open('requirements.txt') as f:
for line in f.readlines():
req = line.strip()
- if not req or req.startswith(('-e', '#')):
+ if not req or req.startswith('#') or '://' in req:
continue
install_requires.append(req)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -190,7 +190,7 @@\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n- if not req or req.startswith(('-e', '#')):\n+ if not req or req.startswith('#') or '://' in req:\n continue\n install_requires.append(req)\n", "issue": "Missing zmq and jsonschema\nIt seems like two additional dependencies are missing.\n\n``` bash\nTraceback (most recent call last):\n File \"/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/utils/zmqrelated.py\", line 35, in check_for_zmq\n import zmq\nImportError: No module named 'zmq'\n```\n\n``` bash\nTraceback (most recent call last):\n File \"/home/stanleygu/.virtualenvs/localpy/src/ipython/IPython/nbformat/validator.py\", line 10, in <module>\n from jsonschema import ValidationError\nImportError: No module named 'jsonschema'\n\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding: utf-8\n\n# Copyright (c) Juptyer Development Team.\n# Distributed under the terms of the Modified BSD License.\n\n#-----------------------------------------------------------------------------\n# Minimal Python version sanity check (from IPython)\n#-----------------------------------------------------------------------------\n\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nv = sys.version_info\nif v[:2] < (3,3):\n error = \"ERROR: Jupyter Hub requires Python version 3.3 or above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\nif os.name in ('nt', 'dos'):\n error = \"ERROR: Windows is not supported\"\n print(error, file=sys.stderr)\n\n# At least we're on the python version we need, move on.\n\nimport os\n\nfrom glob import glob\n\nfrom distutils.core import setup\nfrom subprocess import check_call\n\npjoin = os.path.join\n\nhere = os.path.abspath(os.path.dirname(__file__))\nshare_jupyter = pjoin(here, 'share', 'jupyter')\nstatic = pjoin(share_jupyter, 'static')\n\n#---------------------------------------------------------------------------\n# Build basic package data, etc.\n#---------------------------------------------------------------------------\n\ndef get_data_files():\n \"\"\"Get data files in share/jupyter\"\"\"\n \n data_files = []\n ntrim = len(here) + 1\n \n for (d, dirs, filenames) in os.walk(share_jupyter):\n data_files.append((\n d[ntrim:],\n [ pjoin(d, f) for f in filenames ]\n ))\n return data_files\n\n\nns = {}\nwith open(pjoin(here, 'jupyterhub', 'version.py')) as f:\n exec(f.read(), {}, ns)\n\n\npackages = []\nfor d, _, _ in os.walk('jupyterhub'):\n if os.path.exists(pjoin(d, '__init__.py')):\n packages.append(d.replace(os.path.sep, '.'))\n\nsetup_args = dict(\n name = 'jupyterhub',\n scripts = glob(pjoin('scripts', '*')),\n packages = packages,\n # dummy, so that install_data doesn't get skipped\n # this will be overridden when bower is run anyway\n data_files = get_data_files() or ['dummy'],\n version = ns['__version__'],\n description = \"\"\"JupyterHub: A multi-user server for Jupyter notebooks\"\"\",\n long_description = \"\",\n author = \"Jupyter Development Team\",\n author_email = \"[email protected]\",\n url = \"http://jupyter.org\",\n license = \"BSD\",\n platforms = \"Linux, Mac OS X\",\n keywords = ['Interactive', 'Interpreter', 'Shell', 'Web'],\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n ],\n)\n\n#---------------------------------------------------------------------------\n# custom distutils commands\n#---------------------------------------------------------------------------\n\n# imports here, so they are after setuptools import if there was one\nfrom distutils.cmd import Command\nfrom distutils.command.install import install\n\nclass BaseCommand(Command):\n \"\"\"Dumb empty command because Command needs subclasses to override too much\"\"\"\n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def get_inputs(self):\n return []\n \n def get_outputs(self):\n return []\n\n\nclass Bower(BaseCommand):\n description = \"fetch static client-side components with bower\"\n \n user_options = []\n \n def run(self):\n try:\n check_call(['bower', 'install', '--allow-root'])\n except OSError as e:\n print(\"Failed to run bower: %s\" % e, file=sys.stderr)\n print(\"You can install bower with `npm install -g bower`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\nclass CSS(BaseCommand):\n description = \"compile CSS from LESS\"\n \n user_options = []\n \n def initialize_options(self):\n pass\n \n def finalize_options(self):\n pass\n \n def run(self):\n style_less = pjoin(static, 'less', 'style.less')\n style_css = pjoin(static, 'css', 'style.min.css')\n sourcemap = style_css + '.map'\n try:\n check_call([\n 'lessc', '-x', '--verbose',\n '--source-map-basepath={}'.format(static),\n '--source-map={}'.format(sourcemap),\n '--source-map-rootpath=../',\n style_less, style_css,\n ])\n except OSError as e:\n print(\"Failed to run lessc: %s\" % e, file=sys.stderr)\n print(\"You can install less with `npm install -g less`\", file=sys.stderr)\n raise\n # update data-files in case this created new files\n self.distribution.data_files = get_data_files()\n\n# ensure bower is run as part of install\ninstall.sub_commands.insert(0, ('js', None))\ninstall.sub_commands.insert(1, ('css', None))\n\nsetup_args['cmdclass'] = {\n 'js': Bower,\n 'css': CSS,\n}\n\n\n# setuptools requirements\n\nif 'setuptools' in sys.modules:\n setup_args['zip_safe'] = False\n from setuptools.command.develop import develop\n class develop_js_css(develop):\n def run(self):\n if not self.uninstall:\n self.distribution.run_command('js')\n self.distribution.run_command('css')\n develop.run(self)\n setup_args['cmdclass']['develop'] = develop_js_css\n setup_args['install_requires'] = install_requires = []\n\n with open('requirements.txt') as f:\n for line in f.readlines():\n req = line.strip()\n if not req or req.startswith(('-e', '#')):\n continue\n install_requires.append(req)\n\n#---------------------------------------------------------------------------\n# setup\n#---------------------------------------------------------------------------\n\ndef main():\n setup(**setup_args)\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 2,527 | 96 |
gh_patches_debug_13945 | rasdani/github-patches | git_diff | optuna__optuna-1007 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Order dependencies in `setup.py` alphabetically.
The libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.
Examples:
https://github.com/optuna/optuna/blob/master/setup.py#L30-L41
```python
def get_install_requires() -> List[str]:
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
'joblib',
]
```
https://github.com/optuna/optuna/blob/master/setup.py#L61-L66
```python
'doctest': [
'pandas',
'cma',
'scikit-learn>=0.19.0',
'plotly>=4.0.0',
],
```
Order dependencies in `setup.py` alphabetically.
The libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.
Examples:
https://github.com/optuna/optuna/blob/master/setup.py#L30-L41
```python
def get_install_requires() -> List[str]:
return [
'alembic',
'cliff',
'colorlog',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
'joblib',
]
```
https://github.com/optuna/optuna/blob/master/setup.py#L61-L66
```python
'doctest': [
'pandas',
'cma',
'scikit-learn>=0.19.0',
'plotly>=4.0.0',
],
```
</issue>
<code>
[start of setup.py]
1 import os
2 import sys
3
4 import pkg_resources
5 from setuptools import find_packages
6 from setuptools import setup
7
8 from typing import Dict
9 from typing import List
10 from typing import Optional
11
12
13 def get_version() -> str:
14
15 version_filepath = os.path.join(os.path.dirname(__file__), 'optuna', 'version.py')
16 with open(version_filepath) as f:
17 for line in f:
18 if line.startswith('__version__'):
19 return line.strip().split()[-1][1:-1]
20 assert False
21
22
23 def get_long_description() -> str:
24
25 readme_filepath = os.path.join(os.path.dirname(__file__), 'README.md')
26 with open(readme_filepath) as f:
27 return f.read()
28
29
30 def get_install_requires() -> List[str]:
31
32 return [
33 'alembic',
34 'cliff',
35 'colorlog',
36 'numpy',
37 'scipy!=1.4.0',
38 'sqlalchemy>=1.1.0',
39 'tqdm',
40 'joblib',
41 ]
42
43
44 def get_tests_require() -> List[str]:
45
46 return get_extras_require()['testing']
47
48
49 def get_extras_require() -> Dict[str, List[str]]:
50
51 requirements = {
52 'checking': [
53 'autopep8',
54 'hacking',
55 'mypy',
56 ],
57 'codecov': [
58 'codecov',
59 'pytest-cov',
60 ],
61 'doctest': [
62 'pandas',
63 'cma',
64 'scikit-learn>=0.19.0',
65 'plotly>=4.0.0',
66 'scikit-optimize',
67 ],
68 'document': [
69 'sphinx',
70 'sphinx_rtd_theme',
71 ],
72 'example': [
73 'catboost',
74 'chainer',
75 'lightgbm',
76 'mlflow',
77 'mxnet',
78 'pytorch-ignite',
79 'scikit-image',
80 'scikit-learn',
81 'torch',
82 'torchvision>=0.5.0',
83 'xgboost',
84 ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
85 + ([
86 'dask[dataframe]',
87 'dask-ml',
88 'keras',
89 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
90 # https://github.com/optuna/optuna/issues/997.
91 'pytorch-lightning<0.7.0',
92 'tensorflow>=2.0.0',
93 ] if sys.version_info[:2] < (3, 8) else []),
94 'testing': [
95 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
96 # https://github.com/optuna/optuna/issues/1000.
97 'bokeh<2.0.0',
98 'chainer>=5.0.0',
99 'cma',
100 'lightgbm',
101 'mock',
102 'mpi4py',
103 'mxnet',
104 'pandas',
105 'plotly>=4.0.0',
106 'pytest',
107 'pytorch-ignite',
108 'scikit-learn>=0.19.0',
109 'scikit-optimize',
110 'torch',
111 'torchvision>=0.5.0',
112 'xgboost',
113 ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])
114 + ([
115 'keras',
116 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
117 # https://github.com/optuna/optuna/issues/997.
118 'pytorch-lightning<0.7.0',
119 'tensorflow',
120 'tensorflow-datasets',
121 ] if sys.version_info[:2] < (3, 8) else []),
122 }
123
124 return requirements
125
126
127 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
128
129 for pkg in pkgs:
130 try:
131 return pkg_resources.get_distribution(pkg)
132 except pkg_resources.DistributionNotFound:
133 pass
134 return None
135
136
137 pfnopt_pkg = find_any_distribution(['pfnopt'])
138 if pfnopt_pkg is not None:
139 msg = 'We detected that PFNOpt is installed in your environment.\n' \
140 'PFNOpt has been renamed Optuna. Please uninstall the old\n' \
141 'PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).'
142 print(msg)
143 exit(1)
144
145 setup(
146 name='optuna',
147 version=get_version(),
148 description='A hyperparameter optimization framework',
149 long_description=get_long_description(),
150 long_description_content_type='text/markdown',
151 author='Takuya Akiba',
152 author_email='[email protected]',
153 url='https://optuna.org/',
154 packages=find_packages(),
155 package_data={
156 'optuna': [
157 'storages/rdb/alembic.ini',
158 'storages/rdb/alembic/*.*',
159 'storages/rdb/alembic/versions/*.*'
160 ]
161 },
162 install_requires=get_install_requires(),
163 tests_require=get_tests_require(),
164 extras_require=get_extras_require(),
165 entry_points={'console_scripts': ['optuna = optuna.cli:main']})
166
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,11 +33,11 @@
'alembic',
'cliff',
'colorlog',
+ 'joblib',
'numpy',
'scipy!=1.4.0',
'sqlalchemy>=1.1.0',
'tqdm',
- 'joblib',
]
@@ -59,10 +59,10 @@
'pytest-cov',
],
'doctest': [
- 'pandas',
'cma',
- 'scikit-learn>=0.19.0',
+ 'pandas',
'plotly>=4.0.0',
+ 'scikit-learn>=0.19.0',
'scikit-optimize',
],
'document': [
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,11 +33,11 @@\n 'alembic',\n 'cliff',\n 'colorlog',\n+ 'joblib',\n 'numpy',\n 'scipy!=1.4.0',\n 'sqlalchemy>=1.1.0',\n 'tqdm',\n- 'joblib',\n ]\n \n \n@@ -59,10 +59,10 @@\n 'pytest-cov',\n ],\n 'doctest': [\n- 'pandas',\n 'cma',\n- 'scikit-learn>=0.19.0',\n+ 'pandas',\n 'plotly>=4.0.0',\n+ 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n ],\n 'document': [\n", "issue": "Order dependencies in `setup.py` alphabetically.\nThe libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.\r\n\r\nExamples:\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L30-L41\r\n```python\r\ndef get_install_requires() -> List[str]:\r\n\r\n return [\r\n 'alembic',\r\n 'cliff',\r\n 'colorlog',\r\n 'numpy',\r\n 'scipy!=1.4.0',\r\n 'sqlalchemy>=1.1.0',\r\n 'tqdm',\r\n 'joblib',\r\n ]\r\n```\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L61-L66\r\n```python\r\n 'doctest': [\r\n 'pandas',\r\n 'cma',\r\n 'scikit-learn>=0.19.0',\r\n 'plotly>=4.0.0',\r\n ],\r\n```\r\n\r\n\nOrder dependencies in `setup.py` alphabetically.\nThe libraries in `setup.py` are not arranged in alphabetical order. I think it would be helpful for developers if we fix it.\r\n\r\nExamples:\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L30-L41\r\n```python\r\ndef get_install_requires() -> List[str]:\r\n\r\n return [\r\n 'alembic',\r\n 'cliff',\r\n 'colorlog',\r\n 'numpy',\r\n 'scipy!=1.4.0',\r\n 'sqlalchemy>=1.1.0',\r\n 'tqdm',\r\n 'joblib',\r\n ]\r\n```\r\n\r\nhttps://github.com/optuna/optuna/blob/master/setup.py#L61-L66\r\n```python\r\n 'doctest': [\r\n 'pandas',\r\n 'cma',\r\n 'scikit-learn>=0.19.0',\r\n 'plotly>=4.0.0',\r\n ],\r\n```\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), 'optuna', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), 'README.md')\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n return [\n 'alembic',\n 'cliff',\n 'colorlog',\n 'numpy',\n 'scipy!=1.4.0',\n 'sqlalchemy>=1.1.0',\n 'tqdm',\n 'joblib',\n ]\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()['testing']\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n 'checking': [\n 'autopep8',\n 'hacking',\n 'mypy',\n ],\n 'codecov': [\n 'codecov',\n 'pytest-cov',\n ],\n 'doctest': [\n 'pandas',\n 'cma',\n 'scikit-learn>=0.19.0',\n 'plotly>=4.0.0',\n 'scikit-optimize',\n ],\n 'document': [\n 'sphinx',\n 'sphinx_rtd_theme',\n ],\n 'example': [\n 'catboost',\n 'chainer',\n 'lightgbm',\n 'mlflow',\n 'mxnet',\n 'pytorch-ignite',\n 'scikit-image',\n 'scikit-learn',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'dask[dataframe]',\n 'dask-ml',\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow>=2.0.0',\n ] if sys.version_info[:2] < (3, 8) else []),\n 'testing': [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n 'bokeh<2.0.0',\n 'chainer>=5.0.0',\n 'cma',\n 'lightgbm',\n 'mock',\n 'mpi4py',\n 'mxnet',\n 'pandas',\n 'plotly>=4.0.0',\n 'pytest',\n 'pytorch-ignite',\n 'scikit-learn>=0.19.0',\n 'scikit-optimize',\n 'torch',\n 'torchvision>=0.5.0',\n 'xgboost',\n ] + (['fastai<2'] if (3, 5) < sys.version_info[:2] < (3, 8) else [])\n + ([\n 'keras',\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/997.\n 'pytorch-lightning<0.7.0',\n 'tensorflow',\n 'tensorflow-datasets',\n ] if sys.version_info[:2] < (3, 8) else []),\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\npfnopt_pkg = find_any_distribution(['pfnopt'])\nif pfnopt_pkg is not None:\n msg = 'We detected that PFNOpt is installed in your environment.\\n' \\\n 'PFNOpt has been renamed Optuna. Please uninstall the old\\n' \\\n 'PFNOpt in advance (e.g. by executing `$ pip uninstall pfnopt`).'\n print(msg)\n exit(1)\n\nsetup(\n name='optuna',\n version=get_version(),\n description='A hyperparameter optimization framework',\n long_description=get_long_description(),\n long_description_content_type='text/markdown',\n author='Takuya Akiba',\n author_email='[email protected]',\n url='https://optuna.org/',\n packages=find_packages(),\n package_data={\n 'optuna': [\n 'storages/rdb/alembic.ini',\n 'storages/rdb/alembic/*.*',\n 'storages/rdb/alembic/versions/*.*'\n ]\n },\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={'console_scripts': ['optuna = optuna.cli:main']})\n", "path": "setup.py"}]} | 2,501 | 197 |
gh_patches_debug_12864 | rasdani/github-patches | git_diff | facebookresearch__hydra-792 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] interpolation to hydra.job.override_dirname containing another interpolation is not possible
# 🐛 Bug
Now that #725 is fixed, I post an issue for the behavior I described in (https://github.com/facebookresearch/hydra/issues/725#issuecomment-653442315)
In the following example an interpolation to `hydra.job.override_dirname` containing another interpolation results in a crash:
## To reproduce
test.py:
```
import hydra
from omegaconf import DictConfig
@hydra.main(config_path="conf", config_name="config")
def run(config: DictConfig):
print("c", config.c)
if __name__ == "__main__":
run()
```
conf/config.yaml:
```
a: 10
b: 20
c: override_${hydra:job.override_dirname}
```
call:
`python test.py b=\${a}` (I have to escape $)
** Stack trace/error message **
```
Traceback (most recent call last):
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py", line 197, in run_and_report
return func()
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py", line 271, in <lambda>
lambda: hydra.run(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/hydra.py", line 105, in run
return run_job(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py", line 122, in run_job
ret.return_value = task_function(task_cfg)
File "/home/mtadmin/projects/debug_hydra/interpolation_jobdir/test.py", line 16, in run
print("c", config.c)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 315, in __getattr__
self._format_and_raise(key=key, value=None, cause=e)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 95, in _format_and_raise
format_and_raise(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 555, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 313, in __getattr__
return self._get_impl(key=key, default_value=DEFAULT_VALUE_MARKER)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py", line 377, in _get_impl
return self._resolve_with_default(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/basecontainer.py", line 56, in _resolve_with_default
resolved = self._resolve_interpolation(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 370, in _resolve_interpolation
return self._resolve_simple_interpolation(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 344, in _resolve_simple_interpolation
self._format_and_raise(key=inter_key, value=None, cause=e)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 95, in _format_and_raise
format_and_raise(
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 555, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py", line 335, in _resolve_simple_interpolation
value = resolver(root_node, inter_key)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py", line 335, in caching
cache[key] if key in cache else resolver(*OmegaConf._tokenize_args(key))
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py", line 150, in <lambda>
lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py", line 531, in select
format_and_raise(node=cfg, key=key, value=None, cause=e, msg=str(e))
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 623, in format_and_raise
raise_(ex, cause)
File "/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py", line 548, in raise_
raise ex
omegaconf.errors.ConfigKeyError: str interpolation key 'a' not found
full_key: hydra.job.override_dirname
reference_type=Optional[HydraConf]
object_type=HydraConf
```
## Expected Behavior
the `hydra.job.override_dirname` should be able to interpolate `a` via `b`
## System information
- **Hydra Version** : master/1.0.0rc2
- **Python version** : 3.8.3
- **Virtual environment type and version** : miniconda
- **Operating system** : ubuntu 18.04
## Additional context
This can be worked arround with something like this:
```
hdr = HydraConfig.get()
override_dirname= hdr.job.override_dirname
```
</issue>
<code>
[start of hydra/core/hydra_config.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 from typing import Any, Optional
3
4 from omegaconf import DictConfig, OmegaConf
5
6 from hydra.conf import HydraConf
7 from hydra.core.singleton import Singleton
8
9
10 class HydraConfig(metaclass=Singleton):
11 def __init__(self) -> None:
12 self.cfg: Optional[HydraConf] = None
13
14 def set_config(self, cfg: DictConfig) -> None:
15 assert cfg is not None
16 OmegaConf.set_readonly(cfg.hydra, True)
17 assert OmegaConf.get_type(cfg, "hydra") == HydraConf
18 self.cfg = OmegaConf.masked_copy(cfg, "hydra") # type: ignore
19
20 @staticmethod
21 def get() -> HydraConf:
22 instance = HydraConfig.instance()
23 if instance.cfg is None:
24 raise ValueError("HydraConfig was not set")
25 return instance.cfg.hydra # type: ignore
26
27 @staticmethod
28 def initialized() -> bool:
29 instance = HydraConfig.instance()
30 return instance.cfg is not None
31
32 @staticmethod
33 def instance(*args: Any, **kwargs: Any) -> "HydraConfig":
34 return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore
35
[end of hydra/core/hydra_config.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py
--- a/hydra/core/hydra_config.py
+++ b/hydra/core/hydra_config.py
@@ -15,7 +15,13 @@
assert cfg is not None
OmegaConf.set_readonly(cfg.hydra, True)
assert OmegaConf.get_type(cfg, "hydra") == HydraConf
+ # THis is emulating a node that is hidden.
+ # It's quiet a hack but it will be much better once
+ # https://github.com/omry/omegaconf/issues/280 is done
+ # The motivation is that this allows for interpolations from the hydra node
+ # into the user's config.
self.cfg = OmegaConf.masked_copy(cfg, "hydra") # type: ignore
+ self.cfg.hydra._set_parent(cfg) # type: ignore
@staticmethod
def get() -> HydraConf:
| {"golden_diff": "diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py\n--- a/hydra/core/hydra_config.py\n+++ b/hydra/core/hydra_config.py\n@@ -15,7 +15,13 @@\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n+ # THis is emulating a node that is hidden.\n+ # It's quiet a hack but it will be much better once\n+ # https://github.com/omry/omegaconf/issues/280 is done\n+ # The motivation is that this allows for interpolations from the hydra node\n+ # into the user's config.\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n+ self.cfg.hydra._set_parent(cfg) # type: ignore\n \n @staticmethod\n def get() -> HydraConf:\n", "issue": "[Bug] interpolation to hydra.job.override_dirname containing another interpolation is not possible\n# \ud83d\udc1b Bug\r\n\r\nNow that #725 is fixed, I post an issue for the behavior I described in (https://github.com/facebookresearch/hydra/issues/725#issuecomment-653442315)\r\nIn the following example an interpolation to `hydra.job.override_dirname` containing another interpolation results in a crash:\r\n\r\n## To reproduce\r\n\r\ntest.py:\r\n```\r\nimport hydra\r\nfrom omegaconf import DictConfig\r\n\r\n\r\[email protected](config_path=\"conf\", config_name=\"config\")\r\ndef run(config: DictConfig):\r\n print(\"c\", config.c)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n run()\r\n\r\n```\r\n\r\nconf/config.yaml:\r\n```\r\na: 10\r\nb: 20\r\n\r\nc: override_${hydra:job.override_dirname}\r\n```\r\n\r\ncall:\r\n`python test.py b=\\${a}` (I have to escape $)\r\n\r\n\r\n** Stack trace/error message **\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py\", line 197, in run_and_report\r\n return func()\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/utils.py\", line 271, in <lambda>\r\n lambda: hydra.run(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/_internal/hydra.py\", line 105, in run\r\n return run_job(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py\", line 122, in run_job\r\n ret.return_value = task_function(task_cfg)\r\n File \"/home/mtadmin/projects/debug_hydra/interpolation_jobdir/test.py\", line 16, in run\r\n print(\"c\", config.c)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 315, in __getattr__\r\n self._format_and_raise(key=key, value=None, cause=e)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 95, in _format_and_raise\r\n format_and_raise(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 555, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 313, in __getattr__\r\n return self._get_impl(key=key, default_value=DEFAULT_VALUE_MARKER)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/dictconfig.py\", line 377, in _get_impl\r\n return self._resolve_with_default(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/basecontainer.py\", line 56, in _resolve_with_default\r\n resolved = self._resolve_interpolation(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 370, in _resolve_interpolation\r\n return self._resolve_simple_interpolation(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 344, in _resolve_simple_interpolation\r\n self._format_and_raise(key=inter_key, value=None, cause=e)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 95, in _format_and_raise\r\n format_and_raise(\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 555, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/base.py\", line 335, in _resolve_simple_interpolation\r\n value = resolver(root_node, inter_key)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py\", line 335, in caching\r\n cache[key] if key in cache else resolver(*OmegaConf._tokenize_args(key))\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/hydra/core/utils.py\", line 150, in <lambda>\r\n lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/omegaconf.py\", line 531, in select\r\n format_and_raise(node=cfg, key=key, value=None, cause=e, msg=str(e))\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 623, in format_and_raise\r\n raise_(ex, cause)\r\n File \"/home/mtadmin/miniconda3/envs/debug_hydra/lib/python3.8/site-packages/omegaconf/_utils.py\", line 548, in raise_\r\n raise ex\r\nomegaconf.errors.ConfigKeyError: str interpolation key 'a' not found\r\n full_key: hydra.job.override_dirname\r\n reference_type=Optional[HydraConf]\r\n object_type=HydraConf\r\n\r\n```\r\n\r\n## Expected Behavior\r\nthe `hydra.job.override_dirname` should be able to interpolate `a` via `b`\r\n\r\n## System information\r\n- **Hydra Version** : master/1.0.0rc2\r\n- **Python version** : 3.8.3\r\n- **Virtual environment type and version** : miniconda\r\n- **Operating system** : ubuntu 18.04\r\n\r\n## Additional context\r\nThis can be worked arround with something like this:\r\n```\r\nhdr = HydraConfig.get()\r\noverride_dirname= hdr.job.override_dirname\r\n```\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom hydra.conf import HydraConf\nfrom hydra.core.singleton import Singleton\n\n\nclass HydraConfig(metaclass=Singleton):\n def __init__(self) -> None:\n self.cfg: Optional[HydraConf] = None\n\n def set_config(self, cfg: DictConfig) -> None:\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n\n @staticmethod\n def get() -> HydraConf:\n instance = HydraConfig.instance()\n if instance.cfg is None:\n raise ValueError(\"HydraConfig was not set\")\n return instance.cfg.hydra # type: ignore\n\n @staticmethod\n def initialized() -> bool:\n instance = HydraConfig.instance()\n return instance.cfg is not None\n\n @staticmethod\n def instance(*args: Any, **kwargs: Any) -> \"HydraConfig\":\n return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore\n", "path": "hydra/core/hydra_config.py"}]} | 2,445 | 224 |
gh_patches_debug_128 | rasdani/github-patches | git_diff | opsdroid__opsdroid-28 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Regex case sensitive
The regex match is currently case insensitive. It shouldn't be.
https://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30
</issue>
<code>
[start of opsdroid/helper.py]
1 """Helper functions to use within OpsDroid."""
2
3 import logging
4 import re
5
6
7 def set_logging_level(logging_level):
8 """Set the logger level based on the user configuration."""
9 logger = logging.getLogger()
10 if logging_level == 'critical':
11 logger.setLevel(logging.CRITICAL)
12 elif logging_level == 'error':
13 logger.setLevel(logging.ERROR)
14 elif logging_level == 'warning':
15 logger.setLevel(logging.WARNING)
16 elif logging_level == 'info':
17 logger.setLevel(logging.INFO)
18 elif logging_level == 'debug':
19 logger.setLevel(logging.DEBUG)
20 # No need to log the others as they'll never be seen
21 logging.debug("Set log level to debug")
22 else:
23 logger.setLevel(logging.INFO)
24 logging.warning("Log level '" + logging_level +
25 "' unknown, defaulting to 'info'")
26
27
28 def match(regex, message):
29 """Regex match a string."""
30 return re.match(regex, message, re.M | re.I)
31
[end of opsdroid/helper.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opsdroid/helper.py b/opsdroid/helper.py
--- a/opsdroid/helper.py
+++ b/opsdroid/helper.py
@@ -27,4 +27,4 @@
def match(regex, message):
"""Regex match a string."""
- return re.match(regex, message, re.M | re.I)
+ return re.match(regex, message)
| {"golden_diff": "diff --git a/opsdroid/helper.py b/opsdroid/helper.py\n--- a/opsdroid/helper.py\n+++ b/opsdroid/helper.py\n@@ -27,4 +27,4 @@\n \n def match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n- return re.match(regex, message, re.M | re.I)\n+ return re.match(regex, message)\n", "issue": "Regex case sensitive\nThe regex match is currently case insensitive. It shouldn't be.\n\nhttps://github.com/opsdroid/opsdroid/blob/master/opsdroid/helper.py#L30\n\n", "before_files": [{"content": "\"\"\"Helper functions to use within OpsDroid.\"\"\"\n\nimport logging\nimport re\n\n\ndef set_logging_level(logging_level):\n \"\"\"Set the logger level based on the user configuration.\"\"\"\n logger = logging.getLogger()\n if logging_level == 'critical':\n logger.setLevel(logging.CRITICAL)\n elif logging_level == 'error':\n logger.setLevel(logging.ERROR)\n elif logging_level == 'warning':\n logger.setLevel(logging.WARNING)\n elif logging_level == 'info':\n logger.setLevel(logging.INFO)\n elif logging_level == 'debug':\n logger.setLevel(logging.DEBUG)\n # No need to log the others as they'll never be seen\n logging.debug(\"Set log level to debug\")\n else:\n logger.setLevel(logging.INFO)\n logging.warning(\"Log level '\" + logging_level +\n \"' unknown, defaulting to 'info'\")\n\n\ndef match(regex, message):\n \"\"\"Regex match a string.\"\"\"\n return re.match(regex, message, re.M | re.I)\n", "path": "opsdroid/helper.py"}]} | 829 | 86 |
gh_patches_debug_10099 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-905 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CI-fail] HDF5 big data
This test fails due to h5py missing. Probably we can change requisites to include h5py or skip this test if h5py is not found.
```
examples/test_examples.py::test_examples[PyQt4-f17] frontend = PyQt4. f = ('HDF5 big data', 'hdf5.py')
HDF5 big data.....................................FAILED
Python 2.7.16 |Anaconda, Inc.| (default, Mar 14 2019, 21:00:58)
[GCC 7.3.0] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> >>> ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... Traceback (most recent call last):
File "<stdin>", line 6, in <module>
File "hdf5.py", line 19, in <module>
import h5py
ImportError: No module named h5py
>>>
```
</issue>
<code>
[start of examples/utils.py]
1 from __future__ import division, print_function, absolute_import
2 import subprocess
3 import time
4 import os
5 import sys
6 import errno
7 from pyqtgraph.pgcollections import OrderedDict
8 from pyqtgraph.python2_3 import basestring
9
10 path = os.path.abspath(os.path.dirname(__file__))
11
12
13 examples = OrderedDict([
14 ('Command-line usage', 'CLIexample.py'),
15 ('Basic Plotting', 'Plotting.py'),
16 ('ImageView', 'ImageView.py'),
17 ('ParameterTree', 'parametertree.py'),
18 ('Crosshair / Mouse interaction', 'crosshair.py'),
19 ('Data Slicing', 'DataSlicing.py'),
20 ('Plot Customization', 'customPlot.py'),
21 ('Image Analysis', 'imageAnalysis.py'),
22 ('ViewBox Features', 'ViewBoxFeatures.py'),
23 ('Dock widgets', 'dockarea.py'),
24 ('Console', 'ConsoleWidget.py'),
25 ('Histograms', 'histogram.py'),
26 ('Beeswarm plot', 'beeswarm.py'),
27 ('Symbols', 'Symbols.py'),
28 ('Auto-range', 'PlotAutoRange.py'),
29 ('Remote Plotting', 'RemoteSpeedTest.py'),
30 ('Scrolling plots', 'scrollingPlots.py'),
31 ('HDF5 big data', 'hdf5.py'),
32 ('Demos', OrderedDict([
33 ('Optics', 'optics_demos.py'),
34 ('Special relativity', 'relativity_demo.py'),
35 ('Verlet chain', 'verlet_chain_demo.py'),
36 ('Koch Fractal', 'fractal.py'),
37 ])),
38 ('GraphicsItems', OrderedDict([
39 ('Scatter Plot', 'ScatterPlot.py'),
40 #('PlotItem', 'PlotItem.py'),
41 ('IsocurveItem', 'isocurve.py'),
42 ('GraphItem', 'GraphItem.py'),
43 ('ErrorBarItem', 'ErrorBarItem.py'),
44 ('FillBetweenItem', 'FillBetweenItem.py'),
45 ('ImageItem - video', 'ImageItem.py'),
46 ('ImageItem - draw', 'Draw.py'),
47 ('Region-of-Interest', 'ROIExamples.py'),
48 ('Bar Graph', 'BarGraphItem.py'),
49 ('GraphicsLayout', 'GraphicsLayout.py'),
50 ('LegendItem', 'Legend.py'),
51 ('Text Item', 'text.py'),
52 ('Linked Views', 'linkedViews.py'),
53 ('Arrow', 'Arrow.py'),
54 ('ViewBox', 'ViewBoxFeatures.py'),
55 ('Custom Graphics', 'customGraphicsItem.py'),
56 ('Labeled Graph', 'CustomGraphItem.py'),
57 ])),
58 ('Benchmarks', OrderedDict([
59 ('Video speed test', 'VideoSpeedTest.py'),
60 ('Line Plot update', 'PlotSpeedTest.py'),
61 ('Scatter Plot update', 'ScatterPlotSpeedTest.py'),
62 ('Multiple plots', 'MultiPlotSpeedTest.py'),
63 ])),
64 ('3D Graphics', OrderedDict([
65 ('Volumetric', 'GLVolumeItem.py'),
66 ('Isosurface', 'GLIsosurface.py'),
67 ('Surface Plot', 'GLSurfacePlot.py'),
68 ('Scatter Plot', 'GLScatterPlotItem.py'),
69 ('Shaders', 'GLshaders.py'),
70 ('Line Plot', 'GLLinePlotItem.py'),
71 ('Mesh', 'GLMeshItem.py'),
72 ('Image', 'GLImageItem.py'),
73 ])),
74 ('Widgets', OrderedDict([
75 ('PlotWidget', 'PlotWidget.py'),
76 ('SpinBox', 'SpinBox.py'),
77 ('ConsoleWidget', 'ConsoleWidget.py'),
78 ('Histogram / lookup table', 'HistogramLUT.py'),
79 ('TreeWidget', 'TreeWidget.py'),
80 ('ScatterPlotWidget', 'ScatterPlotWidget.py'),
81 ('DataTreeWidget', 'DataTreeWidget.py'),
82 ('GradientWidget', 'GradientWidget.py'),
83 ('TableWidget', 'TableWidget.py'),
84 ('ColorButton', 'ColorButton.py'),
85 #('CheckTable', '../widgets/CheckTable.py'),
86 #('VerticalLabel', '../widgets/VerticalLabel.py'),
87 ('JoystickButton', 'JoystickButton.py'),
88 ])),
89
90 ('Flowcharts', 'Flowchart.py'),
91 ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),
92 ])
93
94
95 def buildFileList(examples, files=None):
96 if files == None:
97 files = []
98 for key, val in examples.items():
99 #item = QtGui.QTreeWidgetItem([key])
100 if isinstance(val, basestring):
101 #item.file = val
102 files.append((key,val))
103 else:
104 buildFileList(val, files)
105 return files
106
107 def testFile(name, f, exe, lib, graphicsSystem=None):
108 global path
109 fn = os.path.join(path,f)
110 #print "starting process: ", fn
111 os.chdir(path)
112 sys.stdout.write(name)
113 sys.stdout.flush()
114
115 import1 = "import %s" % lib if lib != '' else ''
116 import2 = os.path.splitext(os.path.split(fn)[1])[0]
117 graphicsSystem = '' if graphicsSystem is None else "pg.QtGui.QApplication.setGraphicsSystem('%s')" % graphicsSystem
118 code = """
119 try:
120 %s
121 import initExample
122 import pyqtgraph as pg
123 %s
124 import %s
125 import sys
126 print("test complete")
127 sys.stdout.flush()
128 import time
129 while True: ## run a little event loop
130 pg.QtGui.QApplication.processEvents()
131 time.sleep(0.01)
132 except:
133 print("test failed")
134 raise
135
136 """ % (import1, graphicsSystem, import2)
137
138 if sys.platform.startswith('win'):
139 process = subprocess.Popen([exe], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
140 process.stdin.write(code.encode('UTF-8'))
141 process.stdin.close()
142 else:
143 process = subprocess.Popen(['exec %s -i' % (exe)], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
144 process.stdin.write(code.encode('UTF-8'))
145 process.stdin.close() ##?
146 output = ''
147 fail = False
148 while True:
149 try:
150 c = process.stdout.read(1).decode()
151 except IOError as err:
152 if err.errno == errno.EINTR:
153 # Interrupted system call; just try again.
154 c = ''
155 else:
156 raise
157 output += c
158 #sys.stdout.write(c)
159 #sys.stdout.flush()
160 if output.endswith('test complete'):
161 break
162 if output.endswith('test failed'):
163 fail = True
164 break
165 time.sleep(1)
166 process.kill()
167 #res = process.communicate()
168 res = (process.stdout.read(), process.stderr.read())
169
170 if fail or 'exception' in res[1].decode().lower() or 'error' in res[1].decode().lower():
171 print('.' * (50-len(name)) + 'FAILED')
172 print(res[0].decode())
173 print(res[1].decode())
174 else:
175 print('.' * (50-len(name)) + 'passed')
176
[end of examples/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/utils.py b/examples/utils.py
--- a/examples/utils.py
+++ b/examples/utils.py
@@ -4,6 +4,7 @@
import os
import sys
import errno
+import copy
from pyqtgraph.pgcollections import OrderedDict
from pyqtgraph.python2_3 import basestring
@@ -91,6 +92,11 @@
('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),
])
+not_tested = ['HDF5 big data']
+
+tested_examples = copy.deepcopy(examples)
+all(map(tested_examples.pop, not_tested))
+
def buildFileList(examples, files=None):
if files == None:
| {"golden_diff": "diff --git a/examples/utils.py b/examples/utils.py\n--- a/examples/utils.py\n+++ b/examples/utils.py\n@@ -4,6 +4,7 @@\n import os\n import sys\n import errno\n+import copy\n from pyqtgraph.pgcollections import OrderedDict\n from pyqtgraph.python2_3 import basestring\n \n@@ -91,6 +92,11 @@\n ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),\n ])\n \n+not_tested = ['HDF5 big data']\n+\n+tested_examples = copy.deepcopy(examples)\n+all(map(tested_examples.pop, not_tested))\n+\n \n def buildFileList(examples, files=None):\n if files == None:\n", "issue": "[CI-fail] HDF5 big data\nThis test fails due to h5py missing. Probably we can change requisites to include h5py or skip this test if h5py is not found.\r\n```\r\nexamples/test_examples.py::test_examples[PyQt4-f17] frontend = PyQt4. f = ('HDF5 big data', 'hdf5.py')\r\nHDF5 big data.....................................FAILED\r\nPython 2.7.16 |Anaconda, Inc.| (default, Mar 14 2019, 21:00:58) \r\n[GCC 7.3.0] on linux2\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> >>> ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... Traceback (most recent call last):\r\n File \"<stdin>\", line 6, in <module>\r\n File \"hdf5.py\", line 19, in <module>\r\n import h5py\r\nImportError: No module named h5py\r\n>>> \r\n```\n", "before_files": [{"content": "from __future__ import division, print_function, absolute_import\nimport subprocess\nimport time\nimport os\nimport sys\nimport errno\nfrom pyqtgraph.pgcollections import OrderedDict\nfrom pyqtgraph.python2_3 import basestring\n\npath = os.path.abspath(os.path.dirname(__file__))\n\n\nexamples = OrderedDict([\n ('Command-line usage', 'CLIexample.py'),\n ('Basic Plotting', 'Plotting.py'),\n ('ImageView', 'ImageView.py'),\n ('ParameterTree', 'parametertree.py'),\n ('Crosshair / Mouse interaction', 'crosshair.py'),\n ('Data Slicing', 'DataSlicing.py'),\n ('Plot Customization', 'customPlot.py'),\n ('Image Analysis', 'imageAnalysis.py'),\n ('ViewBox Features', 'ViewBoxFeatures.py'),\n ('Dock widgets', 'dockarea.py'),\n ('Console', 'ConsoleWidget.py'),\n ('Histograms', 'histogram.py'),\n ('Beeswarm plot', 'beeswarm.py'),\n ('Symbols', 'Symbols.py'),\n ('Auto-range', 'PlotAutoRange.py'),\n ('Remote Plotting', 'RemoteSpeedTest.py'),\n ('Scrolling plots', 'scrollingPlots.py'),\n ('HDF5 big data', 'hdf5.py'),\n ('Demos', OrderedDict([\n ('Optics', 'optics_demos.py'),\n ('Special relativity', 'relativity_demo.py'),\n ('Verlet chain', 'verlet_chain_demo.py'),\n ('Koch Fractal', 'fractal.py'),\n ])),\n ('GraphicsItems', OrderedDict([\n ('Scatter Plot', 'ScatterPlot.py'),\n #('PlotItem', 'PlotItem.py'),\n ('IsocurveItem', 'isocurve.py'),\n ('GraphItem', 'GraphItem.py'),\n ('ErrorBarItem', 'ErrorBarItem.py'),\n ('FillBetweenItem', 'FillBetweenItem.py'),\n ('ImageItem - video', 'ImageItem.py'),\n ('ImageItem - draw', 'Draw.py'),\n ('Region-of-Interest', 'ROIExamples.py'),\n ('Bar Graph', 'BarGraphItem.py'),\n ('GraphicsLayout', 'GraphicsLayout.py'),\n ('LegendItem', 'Legend.py'),\n ('Text Item', 'text.py'),\n ('Linked Views', 'linkedViews.py'),\n ('Arrow', 'Arrow.py'),\n ('ViewBox', 'ViewBoxFeatures.py'),\n ('Custom Graphics', 'customGraphicsItem.py'),\n ('Labeled Graph', 'CustomGraphItem.py'),\n ])),\n ('Benchmarks', OrderedDict([\n ('Video speed test', 'VideoSpeedTest.py'),\n ('Line Plot update', 'PlotSpeedTest.py'),\n ('Scatter Plot update', 'ScatterPlotSpeedTest.py'),\n ('Multiple plots', 'MultiPlotSpeedTest.py'),\n ])),\n ('3D Graphics', OrderedDict([\n ('Volumetric', 'GLVolumeItem.py'),\n ('Isosurface', 'GLIsosurface.py'),\n ('Surface Plot', 'GLSurfacePlot.py'),\n ('Scatter Plot', 'GLScatterPlotItem.py'),\n ('Shaders', 'GLshaders.py'),\n ('Line Plot', 'GLLinePlotItem.py'),\n ('Mesh', 'GLMeshItem.py'),\n ('Image', 'GLImageItem.py'),\n ])),\n ('Widgets', OrderedDict([\n ('PlotWidget', 'PlotWidget.py'),\n ('SpinBox', 'SpinBox.py'),\n ('ConsoleWidget', 'ConsoleWidget.py'),\n ('Histogram / lookup table', 'HistogramLUT.py'),\n ('TreeWidget', 'TreeWidget.py'),\n ('ScatterPlotWidget', 'ScatterPlotWidget.py'),\n ('DataTreeWidget', 'DataTreeWidget.py'),\n ('GradientWidget', 'GradientWidget.py'),\n ('TableWidget', 'TableWidget.py'),\n ('ColorButton', 'ColorButton.py'),\n #('CheckTable', '../widgets/CheckTable.py'),\n #('VerticalLabel', '../widgets/VerticalLabel.py'),\n ('JoystickButton', 'JoystickButton.py'),\n ])),\n\n ('Flowcharts', 'Flowchart.py'),\n ('Custom Flowchart Nodes', 'FlowchartCustomNode.py'),\n])\n\n\ndef buildFileList(examples, files=None):\n if files == None:\n files = []\n for key, val in examples.items():\n #item = QtGui.QTreeWidgetItem([key])\n if isinstance(val, basestring):\n #item.file = val\n files.append((key,val))\n else:\n buildFileList(val, files)\n return files\n\ndef testFile(name, f, exe, lib, graphicsSystem=None):\n global path\n fn = os.path.join(path,f)\n #print \"starting process: \", fn\n os.chdir(path)\n sys.stdout.write(name)\n sys.stdout.flush()\n\n import1 = \"import %s\" % lib if lib != '' else ''\n import2 = os.path.splitext(os.path.split(fn)[1])[0]\n graphicsSystem = '' if graphicsSystem is None else \"pg.QtGui.QApplication.setGraphicsSystem('%s')\" % graphicsSystem\n code = \"\"\"\ntry:\n %s\n import initExample\n import pyqtgraph as pg\n %s\n import %s\n import sys\n print(\"test complete\")\n sys.stdout.flush()\n import time\n while True: ## run a little event loop\n pg.QtGui.QApplication.processEvents()\n time.sleep(0.01)\nexcept:\n print(\"test failed\")\n raise\n\n\"\"\" % (import1, graphicsSystem, import2)\n\n if sys.platform.startswith('win'):\n process = subprocess.Popen([exe], stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close()\n else:\n process = subprocess.Popen(['exec %s -i' % (exe)], shell=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n process.stdin.write(code.encode('UTF-8'))\n process.stdin.close() ##?\n output = ''\n fail = False\n while True:\n try:\n c = process.stdout.read(1).decode()\n except IOError as err:\n if err.errno == errno.EINTR:\n # Interrupted system call; just try again.\n c = ''\n else:\n raise\n output += c\n #sys.stdout.write(c)\n #sys.stdout.flush()\n if output.endswith('test complete'):\n break\n if output.endswith('test failed'):\n fail = True\n break\n time.sleep(1)\n process.kill()\n #res = process.communicate()\n res = (process.stdout.read(), process.stderr.read())\n\n if fail or 'exception' in res[1].decode().lower() or 'error' in res[1].decode().lower():\n print('.' * (50-len(name)) + 'FAILED')\n print(res[0].decode())\n print(res[1].decode())\n else:\n print('.' * (50-len(name)) + 'passed')\n", "path": "examples/utils.py"}]} | 2,674 | 150 |
gh_patches_debug_38561 | rasdani/github-patches | git_diff | Lightning-Universe__lightning-flash-1666 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`download_data` from `flash.core.data.utils` connects to the internet before checking if a file exists
## 🐛 Bug
In many supercomputers, process of running ML codes is to first run the download part on the login nodes (which have access to the internet), and stop the code right before the actual training starts.
Then, when you run on the compute nodes (the ones with the actual gpus and no internet access), you let the code run to the end. In other frameworks, data downloaders detect the files' presence and skip it before ever trying to connect to the internet.
Flash tries first to check file size in [this line](https://github.com/Lightning-Universe/lightning-flash/blob/18ff71e228ea0d68d6564ae454a7053e503dee15/src/flash/core/data/utils.py#L86), which will freeze in a machine without internet.
### To Reproduce
call "download_data" on a machine with no internet access
#### Code sample
```
from flash.core.data.utils import download_data
download_data("https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip", "data/")
```
### Expected behavior
If the file is already there, skip download
### Environment
- OS (e.g., Linux): Centos 8.6
- Python version: 3.10
- PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): Pytorch 1.12.1, Lightning 0.8.4, Flash 0.8.1.post0
- GPU models and configuration: 16x A100 40GB
- Any other relevant information:
### Additional context
Fast.ai's fastdownload, for example, does not suffer from this - if the file is there, it doesn't try to download it, even if it's of the wrong size: [fastdownload link](https://github.com/fastai/fastcore/blob/1f6844d44d6e0e26b393cecd37818dbb4d391aca/fastcore/net.py#L180)
</issue>
<code>
[start of src/flash/core/data/utils.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os.path
16 import tarfile
17 import zipfile
18 from typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set
19
20 import requests
21 import urllib3
22 from pytorch_lightning.utilities.apply_func import apply_to_collection
23 from torch import nn
24 from tqdm.auto import tqdm as tq
25
26 from flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE
27 from flash.core.utilities.stages import RunningStage
28
29 # Skip doctests if requirements aren't available
30 if not _TOPIC_CORE_AVAILABLE:
31 __doctest_skip__ = ["download_data"]
32
33 _STAGES_PREFIX = {
34 RunningStage.TRAINING: "train",
35 RunningStage.TESTING: "test",
36 RunningStage.VALIDATING: "val",
37 RunningStage.PREDICTING: "predict",
38 RunningStage.SERVING: "serve",
39 RunningStage.SANITY_CHECKING: "val",
40 }
41
42 _INPUT_TRANSFORM_FUNCS: Set[str] = {
43 "per_sample_transform",
44 "per_batch_transform",
45 "per_sample_transform_on_device",
46 "per_batch_transform_on_device",
47 "collate",
48 }
49
50 _CALLBACK_FUNCS: Set[str] = {
51 "load_sample",
52 *_INPUT_TRANSFORM_FUNCS,
53 }
54
55 _OUTPUT_TRANSFORM_FUNCS: Set[str] = {
56 "per_batch_transform",
57 "uncollate",
58 "per_sample_transform",
59 }
60
61
62 def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:
63 """Download file with progressbar.
64
65 # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
66 # __author__ = "github.com/ruxi"
67 # __license__ = "MIT"
68
69 Examples
70 ________
71
72 .. doctest::
73
74 >>> import os
75 >>> from flash.core.data.utils import download_data
76 >>> download_data("https://pl-flash-data.s3.amazonaws.com/titanic.zip", "./data")
77 >>> os.listdir("./data") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
78 [...]
79
80 """
81 # Disable warning about making an insecure request
82 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
83
84 if not os.path.exists(path):
85 os.makedirs(path)
86 local_filename = os.path.join(path, url.split("/")[-1])
87 r = requests.get(url, stream=True, verify=False)
88 file_size = int(r.headers["Content-Length"]) if "Content-Length" in r.headers else 0
89 chunk_size = 1024
90 num_bars = int(file_size / chunk_size)
91 if verbose:
92 print({"file_size": file_size})
93 print({"num_bars": num_bars})
94
95 if not os.path.exists(local_filename):
96 with open(local_filename, "wb") as fp:
97 for chunk in tq(
98 r.iter_content(chunk_size=chunk_size),
99 total=num_bars,
100 unit="KB",
101 desc=local_filename,
102 leave=True, # progressbar stays
103 ):
104 fp.write(chunk) # type: ignore
105
106 def extract_tarfile(file_path: str, extract_path: str, mode: str):
107 if os.path.exists(file_path):
108 with tarfile.open(file_path, mode=mode) as tar_ref:
109 for member in tar_ref.getmembers():
110 try:
111 tar_ref.extract(member, path=extract_path, set_attrs=False)
112 except PermissionError:
113 raise PermissionError(f"Could not extract tar file {file_path}")
114
115 if ".zip" in local_filename:
116 if os.path.exists(local_filename):
117 with zipfile.ZipFile(local_filename, "r") as zip_ref:
118 zip_ref.extractall(path)
119 elif local_filename.endswith(".tar.gz") or local_filename.endswith(".tgz"):
120 extract_tarfile(local_filename, path, "r:gz")
121 elif local_filename.endswith(".tar.bz2") or local_filename.endswith(".tbz"):
122 extract_tarfile(local_filename, path, "r:bz2")
123
124
125 class FuncModule(nn.Module):
126 """This class is used to wrap a callable within a nn.Module and apply the wrapped function in `__call__`"""
127
128 def __init__(self, func: Callable) -> None:
129 super().__init__()
130 self.func = func
131
132 def forward(self, *args, **kwargs) -> Any:
133 return self.func(*args, **kwargs)
134
135 def __str__(self) -> str:
136 return f"{self.__class__.__name__}({self.func.__name__})"
137
138 def __repr__(self):
139 return str(self.func)
140
141
142 def convert_to_modules(transforms: Optional[Dict[str, Callable]]):
143 if transforms is None or isinstance(transforms, nn.Module):
144 return transforms
145
146 transforms = apply_to_collection(transforms, Callable, FuncModule, wrong_dtype=nn.Module)
147 transforms = apply_to_collection(transforms, Mapping, nn.ModuleDict, wrong_dtype=nn.ModuleDict)
148 return apply_to_collection(transforms, Iterable, nn.ModuleList, wrong_dtype=(nn.ModuleList, nn.ModuleDict))
149
[end of src/flash/core/data/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/flash/core/data/utils.py b/src/flash/core/data/utils.py
--- a/src/flash/core/data/utils.py
+++ b/src/flash/core/data/utils.py
@@ -59,7 +59,7 @@
}
-def download_data(url: str, path: str = "data/", verbose: bool = False) -> None:
+def download_data(url: str, path: str = "data/", verbose: bool = False, chunk_size: int = 1024) -> None:
"""Download file with progressbar.
# Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603
@@ -78,39 +78,42 @@
[...]
"""
+ local_filename = os.path.join(path, url.split("/")[-1])
+ if os.path.exists(local_filename):
+ if verbose:
+ print(f"local file already exists: '{local_filename}'")
+ return
+
+ os.makedirs(path, exist_ok=True)
# Disable warning about making an insecure request
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
- if not os.path.exists(path):
- os.makedirs(path)
- local_filename = os.path.join(path, url.split("/")[-1])
r = requests.get(url, stream=True, verify=False)
file_size = int(r.headers["Content-Length"]) if "Content-Length" in r.headers else 0
- chunk_size = 1024
num_bars = int(file_size / chunk_size)
if verbose:
- print({"file_size": file_size})
- print({"num_bars": num_bars})
-
- if not os.path.exists(local_filename):
- with open(local_filename, "wb") as fp:
- for chunk in tq(
- r.iter_content(chunk_size=chunk_size),
- total=num_bars,
- unit="KB",
- desc=local_filename,
- leave=True, # progressbar stays
- ):
- fp.write(chunk) # type: ignore
-
- def extract_tarfile(file_path: str, extract_path: str, mode: str):
- if os.path.exists(file_path):
- with tarfile.open(file_path, mode=mode) as tar_ref:
- for member in tar_ref.getmembers():
- try:
- tar_ref.extract(member, path=extract_path, set_attrs=False)
- except PermissionError:
- raise PermissionError(f"Could not extract tar file {file_path}")
+ print(f"file size: {file_size}")
+ print(f"num bars: {num_bars}")
+
+ with open(local_filename, "wb") as fp:
+ for chunk in tq(
+ r.iter_content(chunk_size=chunk_size),
+ total=num_bars,
+ unit="KB",
+ desc=local_filename,
+ leave=True, # progressbar stays
+ ):
+ fp.write(chunk) # type: ignore
+
+ def extract_tarfile(file_path: str, extract_path: str, mode: str) -> None:
+ if not os.path.exists(file_path):
+ return
+ with tarfile.open(file_path, mode=mode) as tar_ref:
+ for member in tar_ref.getmembers():
+ try:
+ tar_ref.extract(member, path=extract_path, set_attrs=False)
+ except PermissionError:
+ raise PermissionError(f"Could not extract tar file {file_path}")
if ".zip" in local_filename:
if os.path.exists(local_filename):
| {"golden_diff": "diff --git a/src/flash/core/data/utils.py b/src/flash/core/data/utils.py\n--- a/src/flash/core/data/utils.py\n+++ b/src/flash/core/data/utils.py\n@@ -59,7 +59,7 @@\n }\n \n \n-def download_data(url: str, path: str = \"data/\", verbose: bool = False) -> None:\n+def download_data(url: str, path: str = \"data/\", verbose: bool = False, chunk_size: int = 1024) -> None:\n \"\"\"Download file with progressbar.\n \n # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n@@ -78,39 +78,42 @@\n [...]\n \n \"\"\"\n+ local_filename = os.path.join(path, url.split(\"/\")[-1])\n+ if os.path.exists(local_filename):\n+ if verbose:\n+ print(f\"local file already exists: '{local_filename}'\")\n+ return\n+\n+ os.makedirs(path, exist_ok=True)\n # Disable warning about making an insecure request\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n \n- if not os.path.exists(path):\n- os.makedirs(path)\n- local_filename = os.path.join(path, url.split(\"/\")[-1])\n r = requests.get(url, stream=True, verify=False)\n file_size = int(r.headers[\"Content-Length\"]) if \"Content-Length\" in r.headers else 0\n- chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n- print({\"file_size\": file_size})\n- print({\"num_bars\": num_bars})\n-\n- if not os.path.exists(local_filename):\n- with open(local_filename, \"wb\") as fp:\n- for chunk in tq(\n- r.iter_content(chunk_size=chunk_size),\n- total=num_bars,\n- unit=\"KB\",\n- desc=local_filename,\n- leave=True, # progressbar stays\n- ):\n- fp.write(chunk) # type: ignore\n-\n- def extract_tarfile(file_path: str, extract_path: str, mode: str):\n- if os.path.exists(file_path):\n- with tarfile.open(file_path, mode=mode) as tar_ref:\n- for member in tar_ref.getmembers():\n- try:\n- tar_ref.extract(member, path=extract_path, set_attrs=False)\n- except PermissionError:\n- raise PermissionError(f\"Could not extract tar file {file_path}\")\n+ print(f\"file size: {file_size}\")\n+ print(f\"num bars: {num_bars}\")\n+\n+ with open(local_filename, \"wb\") as fp:\n+ for chunk in tq(\n+ r.iter_content(chunk_size=chunk_size),\n+ total=num_bars,\n+ unit=\"KB\",\n+ desc=local_filename,\n+ leave=True, # progressbar stays\n+ ):\n+ fp.write(chunk) # type: ignore\n+\n+ def extract_tarfile(file_path: str, extract_path: str, mode: str) -> None:\n+ if not os.path.exists(file_path):\n+ return\n+ with tarfile.open(file_path, mode=mode) as tar_ref:\n+ for member in tar_ref.getmembers():\n+ try:\n+ tar_ref.extract(member, path=extract_path, set_attrs=False)\n+ except PermissionError:\n+ raise PermissionError(f\"Could not extract tar file {file_path}\")\n \n if \".zip\" in local_filename:\n if os.path.exists(local_filename):\n", "issue": "`download_data` from `flash.core.data.utils` connects to the internet before checking if a file exists\n## \ud83d\udc1b Bug\r\n\r\nIn many supercomputers, process of running ML codes is to first run the download part on the login nodes (which have access to the internet), and stop the code right before the actual training starts. \r\n\r\nThen, when you run on the compute nodes (the ones with the actual gpus and no internet access), you let the code run to the end. In other frameworks, data downloaders detect the files' presence and skip it before ever trying to connect to the internet.\r\n\r\nFlash tries first to check file size in [this line](https://github.com/Lightning-Universe/lightning-flash/blob/18ff71e228ea0d68d6564ae454a7053e503dee15/src/flash/core/data/utils.py#L86), which will freeze in a machine without internet.\r\n\r\n### To Reproduce\r\n\r\ncall \"download_data\" on a machine with no internet access\r\n\r\n#### Code sample\r\n\r\n```\r\nfrom flash.core.data.utils import download_data\r\ndownload_data(\"https://pl-flash-data.s3.amazonaws.com/hymenoptera_data.zip\", \"data/\")\r\n```\r\n\r\n### Expected behavior\r\n\r\nIf the file is already there, skip download\r\n\r\n### Environment\r\n\r\n - OS (e.g., Linux): Centos 8.6\r\n - Python version: 3.10\r\n - PyTorch/Lightning/Flash Version (e.g., 1.10/1.5/0.7): Pytorch 1.12.1, Lightning 0.8.4, Flash 0.8.1.post0\r\n - GPU models and configuration: 16x A100 40GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\nFast.ai's fastdownload, for example, does not suffer from this - if the file is there, it doesn't try to download it, even if it's of the wrong size: [fastdownload link](https://github.com/fastai/fastcore/blob/1f6844d44d6e0e26b393cecd37818dbb4d391aca/fastcore/net.py#L180)\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport tarfile\nimport zipfile\nfrom typing import Any, Callable, Dict, Iterable, Mapping, Optional, Set\n\nimport requests\nimport urllib3\nfrom pytorch_lightning.utilities.apply_func import apply_to_collection\nfrom torch import nn\nfrom tqdm.auto import tqdm as tq\n\nfrom flash.core.utilities.imports import _TOPIC_CORE_AVAILABLE\nfrom flash.core.utilities.stages import RunningStage\n\n# Skip doctests if requirements aren't available\nif not _TOPIC_CORE_AVAILABLE:\n __doctest_skip__ = [\"download_data\"]\n\n_STAGES_PREFIX = {\n RunningStage.TRAINING: \"train\",\n RunningStage.TESTING: \"test\",\n RunningStage.VALIDATING: \"val\",\n RunningStage.PREDICTING: \"predict\",\n RunningStage.SERVING: \"serve\",\n RunningStage.SANITY_CHECKING: \"val\",\n}\n\n_INPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_sample_transform\",\n \"per_batch_transform\",\n \"per_sample_transform_on_device\",\n \"per_batch_transform_on_device\",\n \"collate\",\n}\n\n_CALLBACK_FUNCS: Set[str] = {\n \"load_sample\",\n *_INPUT_TRANSFORM_FUNCS,\n}\n\n_OUTPUT_TRANSFORM_FUNCS: Set[str] = {\n \"per_batch_transform\",\n \"uncollate\",\n \"per_sample_transform\",\n}\n\n\ndef download_data(url: str, path: str = \"data/\", verbose: bool = False) -> None:\n \"\"\"Download file with progressbar.\n\n # Code adapted from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n # __author__ = \"github.com/ruxi\"\n # __license__ = \"MIT\"\n\n Examples\n ________\n\n .. doctest::\n\n >>> import os\n >>> from flash.core.data.utils import download_data\n >>> download_data(\"https://pl-flash-data.s3.amazonaws.com/titanic.zip\", \"./data\")\n >>> os.listdir(\"./data\") # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE\n [...]\n\n \"\"\"\n # Disable warning about making an insecure request\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n if not os.path.exists(path):\n os.makedirs(path)\n local_filename = os.path.join(path, url.split(\"/\")[-1])\n r = requests.get(url, stream=True, verify=False)\n file_size = int(r.headers[\"Content-Length\"]) if \"Content-Length\" in r.headers else 0\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n print({\"file_size\": file_size})\n print({\"num_bars\": num_bars})\n\n if not os.path.exists(local_filename):\n with open(local_filename, \"wb\") as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit=\"KB\",\n desc=local_filename,\n leave=True, # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n def extract_tarfile(file_path: str, extract_path: str, mode: str):\n if os.path.exists(file_path):\n with tarfile.open(file_path, mode=mode) as tar_ref:\n for member in tar_ref.getmembers():\n try:\n tar_ref.extract(member, path=extract_path, set_attrs=False)\n except PermissionError:\n raise PermissionError(f\"Could not extract tar file {file_path}\")\n\n if \".zip\" in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, \"r\") as zip_ref:\n zip_ref.extractall(path)\n elif local_filename.endswith(\".tar.gz\") or local_filename.endswith(\".tgz\"):\n extract_tarfile(local_filename, path, \"r:gz\")\n elif local_filename.endswith(\".tar.bz2\") or local_filename.endswith(\".tbz\"):\n extract_tarfile(local_filename, path, \"r:bz2\")\n\n\nclass FuncModule(nn.Module):\n \"\"\"This class is used to wrap a callable within a nn.Module and apply the wrapped function in `__call__`\"\"\"\n\n def __init__(self, func: Callable) -> None:\n super().__init__()\n self.func = func\n\n def forward(self, *args, **kwargs) -> Any:\n return self.func(*args, **kwargs)\n\n def __str__(self) -> str:\n return f\"{self.__class__.__name__}({self.func.__name__})\"\n\n def __repr__(self):\n return str(self.func)\n\n\ndef convert_to_modules(transforms: Optional[Dict[str, Callable]]):\n if transforms is None or isinstance(transforms, nn.Module):\n return transforms\n\n transforms = apply_to_collection(transforms, Callable, FuncModule, wrong_dtype=nn.Module)\n transforms = apply_to_collection(transforms, Mapping, nn.ModuleDict, wrong_dtype=nn.ModuleDict)\n return apply_to_collection(transforms, Iterable, nn.ModuleList, wrong_dtype=(nn.ModuleList, nn.ModuleDict))\n", "path": "src/flash/core/data/utils.py"}]} | 2,610 | 803 |
gh_patches_debug_21483 | rasdani/github-patches | git_diff | dotkom__onlineweb4-775 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Registering with an invalid email (according to smtplib) causes errors when sending emails
SMTPRecipientsRefused at /auth/register/
{u'[email protected]': (501, '5.1.3 Bad recipient address syntax')}
Request Method: POST
Request URL: http://moonshine.online.ntnu.no/auth/register/
Django Version: 1.6.1
Exception Type: SMTPRecipientsRefused
Exception Value:
{u'[email protected]': (501, '5.1.3 Bad recipient address syntax')}
Exception Location: /usr/lib/python2.7/smtplib.py in sendmail, line 733
</issue>
<code>
[start of apps/authentication/views.py]
1 # -*- coding: utf-8 -*-
2
3 import uuid
4 import re
5
6 from django.contrib import auth
7 from django.contrib import messages
8 from django.core.mail import send_mail
9 from django.shortcuts import render, redirect, get_object_or_404
10 from django.http import HttpResponseRedirect
11 from django.utils.translation import ugettext as _
12 from django.views.decorators.debug import sensitive_post_parameters
13
14 from django.conf import settings
15 from apps.authentication.forms import (LoginForm, RegisterForm,
16 RecoveryForm, ChangePasswordForm)
17 from apps.authentication.models import OnlineUser as User, RegisterToken, Email
18
19
20 @sensitive_post_parameters()
21 def login(request):
22 redirect_url = request.REQUEST.get('next', '')
23 if request.method == 'POST':
24 form = LoginForm(request.POST)
25 if form.login(request):
26 messages.success(request, _(u'Du er nå logget inn.'))
27 if redirect_url:
28 return HttpResponseRedirect(redirect_url)
29 return HttpResponseRedirect('/')
30 else: form = LoginForm(request.POST, auto_id=True)
31 else:
32 form = LoginForm()
33
34 response_dict = { 'form' : form, 'next' : redirect_url}
35 return render(request, 'auth/login.html', response_dict)
36
37
38 def logout(request):
39 auth.logout(request)
40 messages.success(request, _(u'Du er nå logget ut.'))
41 return HttpResponseRedirect('/')
42
43
44 @sensitive_post_parameters()
45 def register(request):
46 if request.user.is_authenticated():
47 messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))
48 return HttpResponseRedirect('/')
49 else:
50 if request.method == 'POST':
51 form = RegisterForm(request.POST)
52 if form.is_valid():
53 cleaned = form.cleaned_data
54
55 # Create user
56 user = User(
57 username=cleaned['username'],
58 first_name=cleaned['first_name'].title(),
59 last_name=cleaned['last_name'].title(),
60 )
61 # Set remaining fields
62 user.phone_number=cleaned['phone']
63 user.address=cleaned['address'].title()
64 user.zip_code=cleaned['zip_code']
65 # Store password properly
66 user.set_password(cleaned['password'])
67 # Users need to be manually activated
68 user.is_active = False
69 user.save()
70
71 # Set email address
72 email = Email(
73 user=user,
74 email=cleaned['email'].lower(),
75 )
76 email.primary = True
77 email.save()
78
79 # Create the registration token
80 token = uuid.uuid4().hex
81 rt = RegisterToken(user=user, email=cleaned['email'], token=token)
82 rt.save()
83
84 email_message = _(u"""
85 En konto har blitt registrert på online.ntnu.no med denne epostadressen. Dersom du ikke
86 har utført denne handlingen ber vi deg se bort fra denne eposten.
87
88 For å bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gjøre
89 dette ved å besøke linken under.
90
91 http://%s/auth/verify/%s/
92
93 Denne lenken vil være gyldig i 24 timer. Dersom du behøver å få tilsendt en ny lenke
94 kan dette gjøres med funksjonen for å gjenopprette passord.
95 """) % (request.META['HTTP_HOST'], token)
96
97 send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
98
99 messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))
100
101 return HttpResponseRedirect('/')
102 else:
103 form = RegisterForm(request.POST, auto_id=True)
104 else:
105 form = RegisterForm()
106
107 return render(request, 'auth/register.html', {'form': form, })
108
109
110 def verify(request, token):
111 rt = get_object_or_404(RegisterToken, token=token)
112
113 if rt.is_valid:
114 email = get_object_or_404(Email, email=rt.email)
115 email.verified = True
116 email.save()
117
118 user = getattr(rt, 'user')
119
120 # If it is a stud email, set the ntnu_username for user
121 if re.match(r'[^@][email protected]', rt.email):
122 user.ntnu_username = rt.email.split("@")[0]
123
124 user_activated = False
125 if not user.is_active:
126 user.is_active = True
127 user_activated = True
128
129 user.save()
130 rt.delete()
131
132 if user_activated:
133 messages.success(request, _(u'Bruker %s ble aktivert. Du kan nå logge inn.') % user.username)
134 return redirect('auth_login')
135 else:
136 messages.success(request, _(u'Eposten %s er nå verifisert.') % email)
137 return redirect('profiles')
138 else:
139 messages.error(request, _(u'Denne lenken er utløpt. Bruk gjenopprett passord for å få tilsendt en ny lenke.'))
140 return HttpResponseRedirect('/')
141
142
143 def recover(request):
144 if request.user.is_authenticated():
145 messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))
146 return HttpResponseRedirect('/')
147 else:
148 if request.method == 'POST':
149 form = RecoveryForm(request.POST)
150 if form.is_valid():
151 email_string = form.cleaned_data['email']
152 emails = Email.objects.filter(email=email_string)
153
154 if len(emails) == 0:
155 messages.error(request, _(u'Denne eposten er ikke registrert i våre systemer.'))
156 return HttpResponseRedirect('/')
157
158 email = emails[0]
159
160 # Create the registration token
161 token = uuid.uuid4().hex
162 rt = RegisterToken(user=email.user, email=email.email, token=token)
163 rt.save()
164
165 email_message = _(u"""
166 Vi har mottat forespørsel om å gjenopprette passordet for kontoen bundet til %s.
167 Dersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.
168
169 Brukernavn: %s
170
171 Hvis du ønsker å gjennomføre en gjenoppretning av passord, bruk lenken under.
172
173 http://%s/auth/set_password/%s/
174
175 Denne lenken vil være gyldig i 24 timer. Dersom du behøver å få tilsendt en ny lenke
176 kan dette gjøres med funksjonen for å gjenopprette passord.
177 """) % (email.email, email.user.username, request.META['HTTP_HOST'], token)
178
179 send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
180
181 messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email.email)
182
183 return HttpResponseRedirect('/')
184 else:
185 form = RecoveryForm(request.POST, auto_id=True)
186 else:
187 form = RecoveryForm()
188
189 return render(request, 'auth/recover.html', {'form': form})
190
191
192 @sensitive_post_parameters()
193 def set_password(request, token=None):
194 if request.user.is_authenticated():
195 return HttpResponseRedirect('/')
196 else:
197 tokens = RegisterToken.objects.filter(token=token)
198
199 if tokens.count() == 1:
200 rt = tokens[0]
201 if rt.is_valid:
202 if request.method == 'POST':
203 form = ChangePasswordForm(request.POST, auto_id=True)
204 if form.is_valid():
205 user = getattr(rt, 'user')
206
207 user.is_active = True
208 user.set_password(form.cleaned_data['new_password'])
209 user.save()
210
211 rt.delete()
212
213 messages.success(request, _(u'Bruker %s har gjennomført vellykket gjenoppretning av passord. Du kan nå logge inn.') % user.username)
214
215 return HttpResponseRedirect('/')
216 else:
217 form = ChangePasswordForm()
218
219 messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn ønsket passord.'))
220
221 return render(request, 'auth/set_password.html', {'form': form, 'token': token})
222
223 else:
224 messages.error(request, _(u'Lenken er ugyldig. Vennligst bruk gjenoppretning av passord for å få tilsendt en ny lenke.'))
225 return HttpResponseRedirect('/')
226
[end of apps/authentication/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/authentication/views.py b/apps/authentication/views.py
--- a/apps/authentication/views.py
+++ b/apps/authentication/views.py
@@ -2,6 +2,7 @@
import uuid
import re
+from smtplib import SMTPException
from django.contrib import auth
from django.contrib import messages
@@ -93,8 +94,11 @@
Denne lenken vil være gyldig i 24 timer. Dersom du behøver å få tilsendt en ny lenke
kan dette gjøres med funksjonen for å gjenopprette passord.
""") % (request.META['HTTP_HOST'], token)
-
- send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
+ try:
+ send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])
+ except SMTPException as e:
+ messages.error(request, u'Det oppstod en kritisk feil, ' + e)
+ return redirect('home')
messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))
| {"golden_diff": "diff --git a/apps/authentication/views.py b/apps/authentication/views.py\n--- a/apps/authentication/views.py\n+++ b/apps/authentication/views.py\n@@ -2,6 +2,7 @@\n \n import uuid\n import re\n+from smtplib import SMTPException\n \n from django.contrib import auth\n from django.contrib import messages\n@@ -93,8 +94,11 @@\n Denne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\n kan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n \"\"\") % (request.META['HTTP_HOST'], token)\n-\n- send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n+ try:\n+ send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n+ except SMTPException as e:\n+ messages.error(request, u'Det oppstod en kritisk feil, ' + e)\n+ return redirect('home')\n \n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n", "issue": "Registering with an invalid email (according to smtplib) causes errors when sending emails\nSMTPRecipientsRefused at /auth/register/\n{u'[email protected]': (501, '5.1.3 Bad recipient address syntax')}\nRequest Method: POST\nRequest URL: http://moonshine.online.ntnu.no/auth/register/\nDjango Version: 1.6.1\nException Type: SMTPRecipientsRefused\nException Value: \n{u'[email protected]': (501, '5.1.3 Bad recipient address syntax')}\nException Location: /usr/lib/python2.7/smtplib.py in sendmail, line 733\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport uuid\nimport re\n\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponseRedirect\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.debug import sensitive_post_parameters\n\nfrom django.conf import settings\nfrom apps.authentication.forms import (LoginForm, RegisterForm, \n RecoveryForm, ChangePasswordForm)\nfrom apps.authentication.models import OnlineUser as User, RegisterToken, Email\n\n\n@sensitive_post_parameters()\ndef login(request):\n redirect_url = request.REQUEST.get('next', '')\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.login(request):\n messages.success(request, _(u'Du er n\u00e5 logget inn.'))\n if redirect_url:\n return HttpResponseRedirect(redirect_url)\n return HttpResponseRedirect('/')\n else: form = LoginForm(request.POST, auto_id=True)\n else:\n form = LoginForm()\n\n response_dict = { 'form' : form, 'next' : redirect_url}\n return render(request, 'auth/login.html', response_dict)\n\n\ndef logout(request):\n auth.logout(request)\n messages.success(request, _(u'Du er n\u00e5 logget ut.'))\n return HttpResponseRedirect('/')\n\n\n@sensitive_post_parameters()\ndef register(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Registrering av ny konto krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RegisterForm(request.POST)\n if form.is_valid():\n cleaned = form.cleaned_data\n\n # Create user\n user = User(\n username=cleaned['username'], \n first_name=cleaned['first_name'].title(), \n last_name=cleaned['last_name'].title(),\n )\n # Set remaining fields\n user.phone_number=cleaned['phone']\n user.address=cleaned['address'].title()\n user.zip_code=cleaned['zip_code']\n # Store password properly\n user.set_password(cleaned['password'])\n # Users need to be manually activated\n user.is_active = False\n user.save()\n\n # Set email address\n email = Email(\n user=user,\n email=cleaned['email'].lower(),\n )\n email.primary = True\n email.save() \n\n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=user, email=cleaned['email'], token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nEn konto har blitt registrert p\u00e5 online.ntnu.no med denne epostadressen. Dersom du ikke\nhar utf\u00f8rt denne handlingen ber vi deg se bort fra denne eposten.\n\nFor \u00e5 bruke denne kontoen kreves det at du verifiserer epostadressen. Du kan gj\u00f8re\ndette ved \u00e5 bes\u00f8ke linken under.\n\nhttp://%s/auth/verify/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Verifiser din konto'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.'))\n\n return HttpResponseRedirect('/') \n else:\n form = RegisterForm(request.POST, auto_id=True)\n else:\n form = RegisterForm()\n\n return render(request, 'auth/register.html', {'form': form, })\n\n\ndef verify(request, token):\n rt = get_object_or_404(RegisterToken, token=token)\n \n if rt.is_valid:\n email = get_object_or_404(Email, email=rt.email)\n email.verified = True\n email.save()\n \n user = getattr(rt, 'user')\n\n # If it is a stud email, set the ntnu_username for user\n if re.match(r'[^@][email protected]', rt.email):\n user.ntnu_username = rt.email.split(\"@\")[0]\n\n user_activated = False\n if not user.is_active:\n user.is_active = True\n user_activated = True\n\n user.save()\n rt.delete()\n\n if user_activated:\n messages.success(request, _(u'Bruker %s ble aktivert. Du kan n\u00e5 logge inn.') % user.username)\n return redirect('auth_login')\n else:\n messages.success(request, _(u'Eposten %s er n\u00e5 verifisert.') % email)\n return redirect('profiles')\n else:\n messages.error(request, _(u'Denne lenken er utl\u00f8pt. Bruk gjenopprett passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n \n\ndef recover(request):\n if request.user.is_authenticated():\n messages.error(request, _(u'Gjenoppretning av passord krever at du er logget ut.'))\n return HttpResponseRedirect('/')\n else:\n if request.method == 'POST':\n form = RecoveryForm(request.POST)\n if form.is_valid():\n email_string = form.cleaned_data['email']\n emails = Email.objects.filter(email=email_string)\n\n if len(emails) == 0:\n messages.error(request, _(u'Denne eposten er ikke registrert i v\u00e5re systemer.'))\n return HttpResponseRedirect('/') \n\n email = emails[0]\n \n # Create the registration token\n token = uuid.uuid4().hex\n rt = RegisterToken(user=email.user, email=email.email, token=token)\n rt.save()\n\n email_message = _(u\"\"\"\nVi har mottat foresp\u00f8rsel om \u00e5 gjenopprette passordet for kontoen bundet til %s.\nDersom du ikke har bedt om denne handlingen ber vi deg se bort fra denne eposten.\n\nBrukernavn: %s\n\nHvis du \u00f8nsker \u00e5 gjennomf\u00f8re en gjenoppretning av passord, bruk lenken under.\n\nhttp://%s/auth/set_password/%s/\n\nDenne lenken vil v\u00e6re gyldig i 24 timer. Dersom du beh\u00f8ver \u00e5 f\u00e5 tilsendt en ny lenke\nkan dette gj\u00f8res med funksjonen for \u00e5 gjenopprette passord.\n\"\"\") % (email.email, email.user.username, request.META['HTTP_HOST'], token)\n\n send_mail(_(u'Gjenoppretning av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email,])\n\n messages.success(request, _(u'En lenke for gjenoppretning har blitt sendt til %s.') % email.email)\n\n return HttpResponseRedirect('/') \n else:\n form = RecoveryForm(request.POST, auto_id=True)\n else:\n form = RecoveryForm()\n\n return render(request, 'auth/recover.html', {'form': form})\n\n\n@sensitive_post_parameters()\ndef set_password(request, token=None): \n if request.user.is_authenticated():\n return HttpResponseRedirect('/')\n else:\n tokens = RegisterToken.objects.filter(token=token)\n \n if tokens.count() == 1:\n rt = tokens[0]\n if rt.is_valid:\n if request.method == 'POST':\n form = ChangePasswordForm(request.POST, auto_id=True)\n if form.is_valid():\n user = getattr(rt, 'user')\n\n user.is_active = True\n user.set_password(form.cleaned_data['new_password'])\n user.save()\n \n rt.delete()\n\n messages.success(request, _(u'Bruker %s har gjennomf\u00f8rt vellykket gjenoppretning av passord. Du kan n\u00e5 logge inn.') % user.username)\n \n return HttpResponseRedirect('/') \n else:\n form = ChangePasswordForm()\n\n messages.success(request, _(u'Lenken er akseptert. Vennligst skriv inn \u00f8nsket passord.'))\n\n return render(request, 'auth/set_password.html', {'form': form, 'token': token})\n\n else:\n messages.error(request, _(u'Lenken er ugyldig. Vennligst bruk gjenoppretning av passord for \u00e5 f\u00e5 tilsendt en ny lenke.'))\n return HttpResponseRedirect('/') \n", "path": "apps/authentication/views.py"}]} | 3,146 | 277 |
gh_patches_debug_24940 | rasdani/github-patches | git_diff | HypothesisWorks__hypothesis-1548 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log message hiding interfers with pytest log caputure fixture.
The log message hiding merged in #1535 interferes with pytests log capturing.
In the example below I am using the [caplog fixture](https://docs.pytest.org/en/latest/logging.html#caplog-fixture) to capture the log message and test that they are correct.
In this simple case just asserting the number of messages. This results in an empty record list in the first call with 3.69.11 but works as expected with 3.69.10
```python
import logging
from hypothesis import given, strategies
@given(x=strategies.floats(min_value=-3, max_value=3))
def test_logcapture(x, caplog):
with caplog.at_level(logging.CRITICAL, logger='root'):
caplog.clear()
logging.critical(f"x is {x}")
assert len(caplog.records) == 1
```
</issue>
<code>
[start of hypothesis-python/src/hypothesis/control.py]
1 # coding=utf-8
2 #
3 # This file is part of Hypothesis, which may be found at
4 # https://github.com/HypothesisWorks/hypothesis-python
5 #
6 # Most of this work is copyright (C) 2013-2018 David R. MacIver
7 # ([email protected]), but it contains contributions by others. See
8 # CONTRIBUTING.rst for a full list of people who may hold copyright, and
9 # consult the git log if you need to determine who owns an individual
10 # contribution.
11 #
12 # This Source Code Form is subject to the terms of the Mozilla Public License,
13 # v. 2.0. If a copy of the MPL was not distributed with this file, You can
14 # obtain one at http://mozilla.org/MPL/2.0/.
15 #
16 # END HEADER
17
18 from __future__ import division, print_function, absolute_import
19
20 import logging
21 import traceback
22
23 from hypothesis import Verbosity, settings
24 from hypothesis.errors import CleanupFailed, InvalidArgument, \
25 UnsatisfiedAssumption
26 from hypothesis.reporting import report
27 from hypothesis.utils.dynamicvariables import DynamicVariable
28
29 if False:
30 from typing import Any, AnyStr # noqa
31
32
33 def reject():
34 raise UnsatisfiedAssumption()
35
36
37 def assume(condition):
38 # type: (Any) -> bool
39 """Calling ``assume`` is like an :ref:`assert <python:assert>` that marks
40 the example as bad, rather than failing the test.
41
42 This allows you to specify properties that you *assume* will be
43 true, and let Hypothesis try to avoid similar examples in future.
44 """
45 if not condition:
46 raise UnsatisfiedAssumption()
47 return True
48
49
50 _current_build_context = DynamicVariable(None)
51
52
53 def current_build_context():
54 context = _current_build_context.value
55 if context is None:
56 raise InvalidArgument(
57 u'No build context registered')
58 return context
59
60
61 class BuildContext(object):
62
63 def __init__(self, data, is_final=False, close_on_capture=True):
64 self.data = data
65 self.tasks = []
66 self.is_final = is_final
67 self.close_on_capture = close_on_capture
68 self.close_on_del = False
69 self.notes = []
70 self.original_logging_disable = logging.NOTSET
71
72 def __enter__(self):
73 if not self.is_final:
74 self.original_logging_disable = logging.root.manager.disable
75 logging.disable(logging.CRITICAL)
76 self.assign_variable = _current_build_context.with_value(self)
77 self.assign_variable.__enter__()
78 return self
79
80 def __exit__(self, exc_type, exc_value, tb):
81 self.assign_variable.__exit__(exc_type, exc_value, tb)
82 if not self.is_final:
83 logging.disable(self.original_logging_disable)
84 if self.close() and exc_type is None:
85 raise CleanupFailed()
86
87 def local(self):
88 return _current_build_context.with_value(self)
89
90 def close(self):
91 any_failed = False
92 for task in self.tasks:
93 try:
94 task()
95 except BaseException:
96 any_failed = True
97 report(traceback.format_exc())
98 return any_failed
99
100
101 def cleanup(teardown):
102 """Register a function to be called when the current test has finished
103 executing. Any exceptions thrown in teardown will be printed but not
104 rethrown.
105
106 Inside a test this isn't very interesting, because you can just use
107 a finally block, but note that you can use this inside map, flatmap,
108 etc. in order to e.g. insist that a value is closed at the end.
109 """
110 context = _current_build_context.value
111 if context is None:
112 raise InvalidArgument(
113 u'Cannot register cleanup outside of build context')
114 context.tasks.append(teardown)
115
116
117 def note(value):
118 # type: (AnyStr) -> None
119 """Report this value in the final execution."""
120 context = _current_build_context.value
121 if context is None:
122 raise InvalidArgument(
123 'Cannot make notes outside of a test')
124 context.notes.append(value)
125 if context.is_final or settings.default.verbosity >= Verbosity.verbose:
126 report(value)
127
128
129 def event(value):
130 # type: (AnyStr) -> None
131 """Record an event that occurred this test. Statistics on number of test
132 runs with each event will be reported at the end if you run Hypothesis in
133 statistics reporting mode.
134
135 Events should be strings or convertible to them.
136 """
137 context = _current_build_context.value
138 if context is None:
139 raise InvalidArgument(
140 'Cannot make record events outside of a test')
141
142 if context.data is not None:
143 context.data.note_event(value)
144
[end of hypothesis-python/src/hypothesis/control.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py
--- a/hypothesis-python/src/hypothesis/control.py
+++ b/hypothesis-python/src/hypothesis/control.py
@@ -17,7 +17,6 @@
from __future__ import division, print_function, absolute_import
-import logging
import traceback
from hypothesis import Verbosity, settings
@@ -67,20 +66,14 @@
self.close_on_capture = close_on_capture
self.close_on_del = False
self.notes = []
- self.original_logging_disable = logging.NOTSET
def __enter__(self):
- if not self.is_final:
- self.original_logging_disable = logging.root.manager.disable
- logging.disable(logging.CRITICAL)
self.assign_variable = _current_build_context.with_value(self)
self.assign_variable.__enter__()
return self
def __exit__(self, exc_type, exc_value, tb):
self.assign_variable.__exit__(exc_type, exc_value, tb)
- if not self.is_final:
- logging.disable(self.original_logging_disable)
if self.close() and exc_type is None:
raise CleanupFailed()
| {"golden_diff": "diff --git a/hypothesis-python/src/hypothesis/control.py b/hypothesis-python/src/hypothesis/control.py\n--- a/hypothesis-python/src/hypothesis/control.py\n+++ b/hypothesis-python/src/hypothesis/control.py\n@@ -17,7 +17,6 @@\n \n from __future__ import division, print_function, absolute_import\n \n-import logging\n import traceback\n \n from hypothesis import Verbosity, settings\n@@ -67,20 +66,14 @@\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n- self.original_logging_disable = logging.NOTSET\n \n def __enter__(self):\n- if not self.is_final:\n- self.original_logging_disable = logging.root.manager.disable\n- logging.disable(logging.CRITICAL)\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n \n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n- if not self.is_final:\n- logging.disable(self.original_logging_disable)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n", "issue": "Log message hiding interfers with pytest log caputure fixture.\nThe log message hiding merged in #1535 interferes with pytests log capturing. \r\n\r\nIn the example below I am using the [caplog fixture](https://docs.pytest.org/en/latest/logging.html#caplog-fixture) to capture the log message and test that they are correct.\r\nIn this simple case just asserting the number of messages. This results in an empty record list in the first call with 3.69.11 but works as expected with 3.69.10\r\n\r\n```python\r\nimport logging\r\nfrom hypothesis import given, strategies\r\n\r\n@given(x=strategies.floats(min_value=-3, max_value=3))\r\ndef test_logcapture(x, caplog):\r\n\r\n with caplog.at_level(logging.CRITICAL, logger='root'):\r\n caplog.clear()\r\n logging.critical(f\"x is {x}\")\r\n assert len(caplog.records) == 1 \r\n```\n", "before_files": [{"content": "# coding=utf-8\n#\n# This file is part of Hypothesis, which may be found at\n# https://github.com/HypothesisWorks/hypothesis-python\n#\n# Most of this work is copyright (C) 2013-2018 David R. MacIver\n# ([email protected]), but it contains contributions by others. See\n# CONTRIBUTING.rst for a full list of people who may hold copyright, and\n# consult the git log if you need to determine who owns an individual\n# contribution.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public License,\n# v. 2.0. If a copy of the MPL was not distributed with this file, You can\n# obtain one at http://mozilla.org/MPL/2.0/.\n#\n# END HEADER\n\nfrom __future__ import division, print_function, absolute_import\n\nimport logging\nimport traceback\n\nfrom hypothesis import Verbosity, settings\nfrom hypothesis.errors import CleanupFailed, InvalidArgument, \\\n UnsatisfiedAssumption\nfrom hypothesis.reporting import report\nfrom hypothesis.utils.dynamicvariables import DynamicVariable\n\nif False:\n from typing import Any, AnyStr # noqa\n\n\ndef reject():\n raise UnsatisfiedAssumption()\n\n\ndef assume(condition):\n # type: (Any) -> bool\n \"\"\"Calling ``assume`` is like an :ref:`assert <python:assert>` that marks\n the example as bad, rather than failing the test.\n\n This allows you to specify properties that you *assume* will be\n true, and let Hypothesis try to avoid similar examples in future.\n \"\"\"\n if not condition:\n raise UnsatisfiedAssumption()\n return True\n\n\n_current_build_context = DynamicVariable(None)\n\n\ndef current_build_context():\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'No build context registered')\n return context\n\n\nclass BuildContext(object):\n\n def __init__(self, data, is_final=False, close_on_capture=True):\n self.data = data\n self.tasks = []\n self.is_final = is_final\n self.close_on_capture = close_on_capture\n self.close_on_del = False\n self.notes = []\n self.original_logging_disable = logging.NOTSET\n\n def __enter__(self):\n if not self.is_final:\n self.original_logging_disable = logging.root.manager.disable\n logging.disable(logging.CRITICAL)\n self.assign_variable = _current_build_context.with_value(self)\n self.assign_variable.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.assign_variable.__exit__(exc_type, exc_value, tb)\n if not self.is_final:\n logging.disable(self.original_logging_disable)\n if self.close() and exc_type is None:\n raise CleanupFailed()\n\n def local(self):\n return _current_build_context.with_value(self)\n\n def close(self):\n any_failed = False\n for task in self.tasks:\n try:\n task()\n except BaseException:\n any_failed = True\n report(traceback.format_exc())\n return any_failed\n\n\ndef cleanup(teardown):\n \"\"\"Register a function to be called when the current test has finished\n executing. Any exceptions thrown in teardown will be printed but not\n rethrown.\n\n Inside a test this isn't very interesting, because you can just use\n a finally block, but note that you can use this inside map, flatmap,\n etc. in order to e.g. insist that a value is closed at the end.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n u'Cannot register cleanup outside of build context')\n context.tasks.append(teardown)\n\n\ndef note(value):\n # type: (AnyStr) -> None\n \"\"\"Report this value in the final execution.\"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make notes outside of a test')\n context.notes.append(value)\n if context.is_final or settings.default.verbosity >= Verbosity.verbose:\n report(value)\n\n\ndef event(value):\n # type: (AnyStr) -> None\n \"\"\"Record an event that occurred this test. Statistics on number of test\n runs with each event will be reported at the end if you run Hypothesis in\n statistics reporting mode.\n\n Events should be strings or convertible to them.\n \"\"\"\n context = _current_build_context.value\n if context is None:\n raise InvalidArgument(\n 'Cannot make record events outside of a test')\n\n if context.data is not None:\n context.data.note_event(value)\n", "path": "hypothesis-python/src/hypothesis/control.py"}]} | 2,078 | 262 |
gh_patches_debug_10841 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-435 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Manual creation of Languages results in Bugsplash at page view.
### Describe the Bug
If a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.

### Steps to Reproduce
1. Create Languages (Like German with code DE_de)
2. Create Region (Like Berlin)
3. Add Language Tree model like German -> English
4. Click on Pages and see error
### Expected Behavior
The usual list should be displayed.
### Actual Behavior
Error message from Django.
### Additional Information
I guess this has something to do with manually setting the language code and this can't be matched by django.
Manual creation of Languages results in Bugsplash at page view.
### Describe the Bug
If a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.

### Steps to Reproduce
1. Create Languages (Like German with code DE_de)
2. Create Region (Like Berlin)
3. Add Language Tree model like German -> English
4. Click on Pages and see error
### Expected Behavior
The usual list should be displayed.
### Actual Behavior
Error message from Django.
### Additional Information
I guess this has something to do with manually setting the language code and this can't be matched by django.
</issue>
<code>
[start of src/cms/templatetags/content_filters.py]
1 import logging
2
3 from django import template
4
5 from ..models import Language
6
7 logger = logging.getLogger(__name__)
8 register = template.Library()
9
10
11 @register.simple_tag
12 def get_translation(instance, language_code):
13 return instance.translations.filter(language__code=language_code).first()
14
15
16 @register.simple_tag
17 def translated_language_name(language_code):
18 return Language.objects.get(code=language_code).translated_name
19
20 @register.simple_tag
21 def get_language(language_code):
22 return Language.objects.get(code=language_code)
23
24 # Unify the language codes of backend and content languages
25 @register.simple_tag
26 def unify_language_code(language_code):
27 if language_code == 'en-gb':
28 return 'en-us'
29 return language_code
30
31
32 @register.filter
33 def get_int_list(data, list_name):
34 return [int(item) for item in data.getlist(list_name)]
35
36
37 @register.filter
38 def is_empty(iterable):
39 return not bool(iterable)
40
[end of src/cms/templatetags/content_filters.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cms/templatetags/content_filters.py b/src/cms/templatetags/content_filters.py
--- a/src/cms/templatetags/content_filters.py
+++ b/src/cms/templatetags/content_filters.py
@@ -15,11 +15,14 @@
@register.simple_tag
def translated_language_name(language_code):
- return Language.objects.get(code=language_code).translated_name
+ language = Language.objects.filter(code=language_code)
+ if language.exists():
+ return language.first().translated_name
+ return ''
@register.simple_tag
def get_language(language_code):
- return Language.objects.get(code=language_code)
+ return Language.objects.filter(code=language_code).first()
# Unify the language codes of backend and content languages
@register.simple_tag
| {"golden_diff": "diff --git a/src/cms/templatetags/content_filters.py b/src/cms/templatetags/content_filters.py\n--- a/src/cms/templatetags/content_filters.py\n+++ b/src/cms/templatetags/content_filters.py\n@@ -15,11 +15,14 @@\n \n @register.simple_tag\n def translated_language_name(language_code):\n- return Language.objects.get(code=language_code).translated_name\n+ language = Language.objects.filter(code=language_code)\n+ if language.exists():\n+ return language.first().translated_name\n+ return ''\n \n @register.simple_tag\n def get_language(language_code):\n- return Language.objects.get(code=language_code)\n+ return Language.objects.filter(code=language_code).first()\n \n # Unify the language codes of backend and content languages\n @register.simple_tag\n", "issue": "Manual creation of Languages results in Bugsplash at page view.\n### Describe the Bug\r\nIf a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create Languages (Like German with code DE_de)\r\n2. Create Region (Like Berlin)\r\n3. Add Language Tree model like German -> English\r\n4. Click on Pages and see error\r\n\r\n### Expected Behavior\r\nThe usual list should be displayed. \r\n\r\n\r\n### Actual Behavior\r\nError message from Django.\r\n\r\n\r\n### Additional Information\r\nI guess this has something to do with manually setting the language code and this can't be matched by django.\r\n\r\n\nManual creation of Languages results in Bugsplash at page view.\n### Describe the Bug\r\nIf a system is set up manually without the test data. The manual language setup results in a error when opening the list view of the pages.\r\n\r\n\r\n\r\n\r\n### Steps to Reproduce\r\n\r\n1. Create Languages (Like German with code DE_de)\r\n2. Create Region (Like Berlin)\r\n3. Add Language Tree model like German -> English\r\n4. Click on Pages and see error\r\n\r\n### Expected Behavior\r\nThe usual list should be displayed. \r\n\r\n\r\n### Actual Behavior\r\nError message from Django.\r\n\r\n\r\n### Additional Information\r\nI guess this has something to do with manually setting the language code and this can't be matched by django.\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom django import template\n\nfrom ..models import Language\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\n\[email protected]_tag\ndef get_translation(instance, language_code):\n return instance.translations.filter(language__code=language_code).first()\n\n\[email protected]_tag\ndef translated_language_name(language_code):\n return Language.objects.get(code=language_code).translated_name\n\[email protected]_tag\ndef get_language(language_code):\n return Language.objects.get(code=language_code)\n\n# Unify the language codes of backend and content languages\[email protected]_tag\ndef unify_language_code(language_code):\n if language_code == 'en-gb':\n return 'en-us'\n return language_code\n\n\[email protected]\ndef get_int_list(data, list_name):\n return [int(item) for item in data.getlist(list_name)]\n\n\[email protected]\ndef is_empty(iterable):\n return not bool(iterable)\n", "path": "src/cms/templatetags/content_filters.py"}]} | 1,230 | 178 |
gh_patches_debug_229 | rasdani/github-patches | git_diff | facebookresearch__hydra-1808 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] hydra-optuna-sweeper 1.1.0 requires numpy<1.20.0
# 🐛 Bug
## Description
<!-- A clear and concise description of what the bug is. -->
I used the guide from
https://hydra.cc/docs/plugins/optuna_sweeper/
And install hydra-optuna-sweeper:
```bash
pip install hydra-optuna-sweeper --upgrade
```
But it seems this plugin requires numpy<1.20.0:

**Edit:**
I searched for optuna's requirements, found this:
https://github.com/optuna/optuna/blob/cbae80476c15b6d39e1d8851dc6a501c63c3ca92/setup.py#L35
Why hydra-optuna-sweeper need to use numpy<1.20.0?
</issue>
<code>
[start of plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
3 __version__ = "1.1.0"
4
[end of plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
--- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
+++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py
@@ -1,3 +1,3 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-__version__ = "1.1.0"
+__version__ = "1.1.1"
| {"golden_diff": "diff --git a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n--- a/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n+++ b/plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py\n@@ -1,3 +1,3 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n \n-__version__ = \"1.1.0\"\n+__version__ = \"1.1.1\"\n", "issue": "[Bug] hydra-optuna-sweeper 1.1.0 requires numpy<1.20.0\n# \ud83d\udc1b Bug\r\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nI used the guide from\r\nhttps://hydra.cc/docs/plugins/optuna_sweeper/\r\n\r\nAnd install hydra-optuna-sweeper:\r\n\r\n```bash \r\npip install hydra-optuna-sweeper --upgrade\r\n```\r\n\r\nBut it seems this plugin requires numpy<1.20.0:\r\n\r\n\r\n\r\n**Edit:**\r\n\r\nI searched for optuna's requirements, found this:\r\n\r\nhttps://github.com/optuna/optuna/blob/cbae80476c15b6d39e1d8851dc6a501c63c3ca92/setup.py#L35\r\n\r\nWhy hydra-optuna-sweeper need to use numpy<1.20.0?\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\n__version__ = \"1.1.0\"\n", "path": "plugins/hydra_optuna_sweeper/hydra_plugins/hydra_optuna_sweeper/__init__.py"}]} | 873 | 163 |
gh_patches_debug_43516 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-577 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Client: API token saving bug on MacOS
@banesullivan was experiencing the following issue:
When calling `create_rgd_client`, if there isn't already a token stored locally (in `$HOME/.rgd/token`), the client is supposed to make a request to the server to create it, and save it in that file. On MacOS, it seems this might not be occurring. The file doesn't appear to ever be created (notably though, the `.rgd` folder _is_ present). Furthermore, if you try to manually populate that file with your token, it will correctly read it, but the file will then be gone afterwards.
This doesn't actually affect authorization, as it still just fetches the token from the API and stores it in memory, but the storage issue needs to be looked into.
</issue>
<code>
[start of django-rgd/client/rgd_client/client.py]
1 import getpass
2 import os
3 from typing import List, Optional, Type
4
5 import requests
6
7 from .plugin import CorePlugin
8 from .session import RgdClientSession, clone_session
9 from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API
10
11
12 class RgdClient:
13 def __init__(
14 self,
15 api_url: str = DEFAULT_RGD_API,
16 username: Optional[str] = None,
17 password: Optional[str] = None,
18 save: Optional[bool] = True,
19 ) -> None:
20 """
21 Initialize the base RGD Client.
22
23 Args:
24 api_url: The base url of the RGD API instance.
25 username: The username to authenticate to the instance with, if any.
26 password: The password associated with the provided username. If None, a prompt will be provided.
27 save: Whether or not to save the logged-in user's API key to disk for future use.
28
29 Returns:
30 A base RgdClient instance.
31 """
32 # Look for an API key in the environment. If it's not there, check username/password
33 api_key = _read_api_key()
34 if api_key is None:
35 if username is not None and password is None:
36 password = getpass.getpass()
37
38 # Get an API key for this user and save it to disk
39 if username and password:
40 api_key = _get_api_key(api_url, username, password, save)
41
42 auth_header = f'Token {api_key}'
43
44 self.session = RgdClientSession(base_url=api_url, auth_header=auth_header)
45 self.rgd = CorePlugin(clone_session(self.session))
46
47 def clear_token(self):
48 """Delete a locally-stored API key."""
49 (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)
50
51
52 def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:
53 """Get an RGD API Key for the given user from the server, and save it if requested."""
54 resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
55 resp.raise_for_status()
56 token = resp.json()['token']
57 if save:
58 API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
59 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
60 fd.write(token)
61 return token
62
63
64 def _read_api_key() -> Optional[str]:
65 """
66 Retrieve an RGD API Key from the users environment.
67
68 This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.
69 If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.
70 """
71 token = os.getenv('RGD_API_TOKEN', None)
72 if token is not None:
73 return token
74
75 try:
76 # read the first line of the text file at ~/.rgd/token
77 with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
78 return fd.readline().strip()
79 except FileNotFoundError:
80 return None
81
82
83 def create_rgd_client(
84 api_url: str = DEFAULT_RGD_API,
85 username: Optional[str] = None,
86 password: Optional[str] = None,
87 save: Optional[bool] = True,
88 extra_plugins: Optional[List[Type]] = None,
89 ):
90 # Avoid circular import
91 from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances
92
93 # Create initial client
94 client = RgdClient(api_url, username, password, save)
95
96 # Perform plugin initialization
97 plugin_classes = _plugin_classes(extra_plugins=extra_plugins)
98 plugin_instances = _plugin_instances(client, plugin_classes)
99 _inject_plugin_deps(plugin_instances)
100
101 return client
102
[end of django-rgd/client/rgd_client/client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py
--- a/django-rgd/client/rgd_client/client.py
+++ b/django-rgd/client/rgd_client/client.py
@@ -1,4 +1,5 @@
import getpass
+import logging
import os
from typing import List, Optional, Type
@@ -8,6 +9,8 @@
from .session import RgdClientSession, clone_session
from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API
+logger = logging.getLogger(__name__)
+
class RgdClient:
def __init__(
@@ -30,7 +33,7 @@
A base RgdClient instance.
"""
# Look for an API key in the environment. If it's not there, check username/password
- api_key = _read_api_key()
+ api_key = _read_api_key(api_url=api_url, username=username, password=password)
if api_key is None:
if username is not None and password is None:
password = getpass.getpass()
@@ -38,6 +41,10 @@
# Get an API key for this user and save it to disk
if username and password:
api_key = _get_api_key(api_url, username, password, save)
+ if api_key is None:
+ logger.error(
+ 'Failed to retrieve API key; are your username and password correct?'
+ )
auth_header = f'Token {api_key}'
@@ -49,11 +56,12 @@
(API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)
-def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:
+def _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:
"""Get an RGD API Key for the given user from the server, and save it if requested."""
resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})
- resp.raise_for_status()
- token = resp.json()['token']
+ token = resp.json().get('token')
+ if token is None:
+ return None
if save:
API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)
with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:
@@ -61,7 +69,7 @@
return token
-def _read_api_key() -> Optional[str]:
+def _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:
"""
Retrieve an RGD API Key from the users environment.
@@ -75,10 +83,29 @@
try:
# read the first line of the text file at ~/.rgd/token
with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:
- return fd.readline().strip()
+ api_key = fd.readline().strip()
except FileNotFoundError:
return None
+ # Make sure API key works by hitting a protected endpoint
+ resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})
+
+ # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted
+ if resp.status_code == 401:
+ logger.error('API key is invalid.')
+ # If username + password were provided, try to get a new API key with them
+ if username is not None and password is not None:
+ logger.warning('Attempting to fetch a new API key...')
+ api_key = _get_api_key(api_url, username, password, save=True)
+ if api_key is not None:
+ logger.warning('Succeeded.')
+ return api_key
+ else:
+ logger.error('Provide your username and password next time to fetch a new one.')
+ return None
+
+ return api_key
+
def create_rgd_client(
api_url: str = DEFAULT_RGD_API,
| {"golden_diff": "diff --git a/django-rgd/client/rgd_client/client.py b/django-rgd/client/rgd_client/client.py\n--- a/django-rgd/client/rgd_client/client.py\n+++ b/django-rgd/client/rgd_client/client.py\n@@ -1,4 +1,5 @@\n import getpass\n+import logging\n import os\n from typing import List, Optional, Type\n \n@@ -8,6 +9,8 @@\n from .session import RgdClientSession, clone_session\n from .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API\n \n+logger = logging.getLogger(__name__)\n+\n \n class RgdClient:\n def __init__(\n@@ -30,7 +33,7 @@\n A base RgdClient instance.\n \"\"\"\n # Look for an API key in the environment. If it's not there, check username/password\n- api_key = _read_api_key()\n+ api_key = _read_api_key(api_url=api_url, username=username, password=password)\n if api_key is None:\n if username is not None and password is None:\n password = getpass.getpass()\n@@ -38,6 +41,10 @@\n # Get an API key for this user and save it to disk\n if username and password:\n api_key = _get_api_key(api_url, username, password, save)\n+ if api_key is None:\n+ logger.error(\n+ 'Failed to retrieve API key; are your username and password correct?'\n+ )\n \n auth_header = f'Token {api_key}'\n \n@@ -49,11 +56,12 @@\n (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)\n \n \n-def _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:\n+def _get_api_key(api_url: str, username: str, password: str, save: bool) -> Optional[str]:\n \"\"\"Get an RGD API Key for the given user from the server, and save it if requested.\"\"\"\n resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})\n- resp.raise_for_status()\n- token = resp.json()['token']\n+ token = resp.json().get('token')\n+ if token is None:\n+ return None\n if save:\n API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:\n@@ -61,7 +69,7 @@\n return token\n \n \n-def _read_api_key() -> Optional[str]:\n+def _read_api_key(api_url: str, username: str = None, password: str = None) -> Optional[str]:\n \"\"\"\n Retrieve an RGD API Key from the users environment.\n \n@@ -75,10 +83,29 @@\n try:\n # read the first line of the text file at ~/.rgd/token\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n- return fd.readline().strip()\n+ api_key = fd.readline().strip()\n except FileNotFoundError:\n return None\n \n+ # Make sure API key works by hitting a protected endpoint\n+ resp = requests.get(f'{api_url}/rgd/collection', headers={'Authorization': f'Token {api_key}'})\n+\n+ # If it doesn't, try to get a new one and save it to ~/.rgd/token, as the current one is corrupted\n+ if resp.status_code == 401:\n+ logger.error('API key is invalid.')\n+ # If username + password were provided, try to get a new API key with them\n+ if username is not None and password is not None:\n+ logger.warning('Attempting to fetch a new API key...')\n+ api_key = _get_api_key(api_url, username, password, save=True)\n+ if api_key is not None:\n+ logger.warning('Succeeded.')\n+ return api_key\n+ else:\n+ logger.error('Provide your username and password next time to fetch a new one.')\n+ return None\n+\n+ return api_key\n+\n \n def create_rgd_client(\n api_url: str = DEFAULT_RGD_API,\n", "issue": "Client: API token saving bug on MacOS\n@banesullivan was experiencing the following issue:\r\n\r\nWhen calling `create_rgd_client`, if there isn't already a token stored locally (in `$HOME/.rgd/token`), the client is supposed to make a request to the server to create it, and save it in that file. On MacOS, it seems this might not be occurring. The file doesn't appear to ever be created (notably though, the `.rgd` folder _is_ present). Furthermore, if you try to manually populate that file with your token, it will correctly read it, but the file will then be gone afterwards.\r\n\r\nThis doesn't actually affect authorization, as it still just fetches the token from the API and stores it in memory, but the storage issue needs to be looked into.\n", "before_files": [{"content": "import getpass\nimport os\nfrom typing import List, Optional, Type\n\nimport requests\n\nfrom .plugin import CorePlugin\nfrom .session import RgdClientSession, clone_session\nfrom .utils import API_KEY_DIR_PATH, API_KEY_FILE_NAME, DEFAULT_RGD_API\n\n\nclass RgdClient:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n ) -> None:\n \"\"\"\n Initialize the base RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n save: Whether or not to save the logged-in user's API key to disk for future use.\n\n Returns:\n A base RgdClient instance.\n \"\"\"\n # Look for an API key in the environment. If it's not there, check username/password\n api_key = _read_api_key()\n if api_key is None:\n if username is not None and password is None:\n password = getpass.getpass()\n\n # Get an API key for this user and save it to disk\n if username and password:\n api_key = _get_api_key(api_url, username, password, save)\n\n auth_header = f'Token {api_key}'\n\n self.session = RgdClientSession(base_url=api_url, auth_header=auth_header)\n self.rgd = CorePlugin(clone_session(self.session))\n\n def clear_token(self):\n \"\"\"Delete a locally-stored API key.\"\"\"\n (API_KEY_DIR_PATH / API_KEY_FILE_NAME).unlink(missing_ok=True)\n\n\ndef _get_api_key(api_url: str, username: str, password: str, save: bool) -> str:\n \"\"\"Get an RGD API Key for the given user from the server, and save it if requested.\"\"\"\n resp = requests.post(f'{api_url}/api-token-auth', {'username': username, 'password': password})\n resp.raise_for_status()\n token = resp.json()['token']\n if save:\n API_KEY_DIR_PATH.mkdir(parents=True, exist_ok=True)\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'w') as fd:\n fd.write(token)\n return token\n\n\ndef _read_api_key() -> Optional[str]:\n \"\"\"\n Retrieve an RGD API Key from the users environment.\n\n This function checks for an environment variable named RGD_API_TOKEN and returns it if it exists.\n If it does not exist, it looks for a file located at ~/.rgd/token and returns its contents.\n \"\"\"\n token = os.getenv('RGD_API_TOKEN', None)\n if token is not None:\n return token\n\n try:\n # read the first line of the text file at ~/.rgd/token\n with open(API_KEY_DIR_PATH / API_KEY_FILE_NAME, 'r') as fd:\n return fd.readline().strip()\n except FileNotFoundError:\n return None\n\n\ndef create_rgd_client(\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n save: Optional[bool] = True,\n extra_plugins: Optional[List[Type]] = None,\n):\n # Avoid circular import\n from ._plugin_utils import _inject_plugin_deps, _plugin_classes, _plugin_instances\n\n # Create initial client\n client = RgdClient(api_url, username, password, save)\n\n # Perform plugin initialization\n plugin_classes = _plugin_classes(extra_plugins=extra_plugins)\n plugin_instances = _plugin_instances(client, plugin_classes)\n _inject_plugin_deps(plugin_instances)\n\n return client\n", "path": "django-rgd/client/rgd_client/client.py"}]} | 1,738 | 938 |
gh_patches_debug_36852 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-961 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Python == 2.7
The read me states that
># Deprecated Python Versions
>Python == 2.7
It does not appear to work with Python == 2.7. (link to question on [SO](https://stackoverflow.com/q/56731694/1841839))
Should this note be removed from the [readme](https://github.com/googleapis/google-api-python-client/edit/master/README.md)? to me deprecation means that it should still work just be removed soon.
</issue>
<code>
[start of noxfile.py]
1
2 # Copyright 2020 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import sys
17
18 import nox
19
20 test_dependencies = [
21 "google-auth",
22 "google-auth-httplib2",
23 "mox",
24 "parameterized",
25 "pyopenssl",
26 "pytest",
27 "pytest-cov",
28 "webtest",
29 "coverage",
30 "unittest2",
31 "mock",
32 ]
33
34
35 @nox.session(python=["3.7"])
36 def lint(session):
37 session.install("flake8")
38 session.run(
39 "flake8",
40 "googleapiclient",
41 "tests",
42 "--count",
43 "--select=E9,F63,F7,F82",
44 "--show-source",
45 "--statistics",
46 )
47
48
49 @nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"])
50 @nox.parametrize(
51 "oauth2client",
52 [
53 "oauth2client<2dev",
54 "oauth2client>=2,<=3dev",
55 "oauth2client>=3,<=4dev",
56 "oauth2client>=4,<=5dev",
57 ],
58 )
59 def unit(session, oauth2client):
60 session.install(*test_dependencies)
61 session.install(oauth2client)
62 if session.python < "3.0":
63 session.install("django<2.0.0")
64 else:
65 session.install("django>=2.0.0")
66
67 session.install('.')
68
69 # Run py.test against the unit tests.
70 session.run(
71 "py.test",
72 "--quiet",
73 "--cov=googleapiclient",
74 "--cov=tests",
75 "--cov-append",
76 "--cov-config=.coveragerc",
77 "--cov-report=",
78 "--cov-fail-under=85",
79 "tests",
80 *session.posargs,
81 )
82
83
84 @nox.session(python="3.6")
85 def docs(session):
86 session.install('.')
87 session.run("python", "describe.py")
[end of noxfile.py]
[start of setup.py]
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Setup script for Google API Python client.
16
17 Also installs included versions of third party libraries, if those libraries
18 are not already installed.
19 """
20 from __future__ import print_function
21
22 import sys
23
24 if sys.version_info < (2, 7):
25 print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
26 sys.exit(1)
27 if (3, 1) <= sys.version_info < (3, 4):
28 print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
29 sys.exit(1)
30
31 import io
32 import os
33 from setuptools import setup
34
35 packages = ["apiclient", "googleapiclient", "googleapiclient/discovery_cache"]
36
37 install_requires = [
38 "httplib2>=0.15.0,<1dev",
39 "google-auth>=1.16.0",
40 "google-auth-httplib2>=0.0.3",
41 "google-api-core>=1.21.0,<2dev",
42 "six>=1.13.0,<2dev",
43 "uritemplate>=3.0.0,<4dev",
44 ]
45
46 package_root = os.path.abspath(os.path.dirname(__file__))
47
48 readme_filename = os.path.join(package_root, "README.md")
49 with io.open(readme_filename, encoding="utf-8") as readme_file:
50 readme = readme_file.read()
51
52 version = "1.12.8"
53
54 setup(
55 name="google-api-python-client",
56 version=version,
57 description="Google API Client Library for Python",
58 long_description=readme,
59 long_description_content_type='text/markdown',
60 author="Google LLC",
61 author_email="[email protected]",
62 url="https://github.com/googleapis/google-api-python-client/",
63 install_requires=install_requires,
64 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
65 packages=packages,
66 package_data={},
67 license="Apache 2.0",
68 keywords="google api client",
69 classifiers=[
70 "Programming Language :: Python :: 2",
71 "Programming Language :: Python :: 2.7",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.5",
74 "Programming Language :: Python :: 3.6",
75 "Programming Language :: Python :: 3.7",
76 "Development Status :: 5 - Production/Stable",
77 "Intended Audience :: Developers",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: OS Independent",
80 "Topic :: Internet :: WWW/HTTP",
81 ],
82 )
83
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/noxfile.py b/noxfile.py
--- a/noxfile.py
+++ b/noxfile.py
@@ -18,6 +18,7 @@
import nox
test_dependencies = [
+ "django>=2.0.0",
"google-auth",
"google-auth-httplib2",
"mox",
@@ -46,7 +47,7 @@
)
[email protected](python=["2.7", "3.5", "3.6", "3.7", "3.8"])
[email protected](python=["3.6", "3.7", "3.8", "3.9"])
@nox.parametrize(
"oauth2client",
[
@@ -59,11 +60,6 @@
def unit(session, oauth2client):
session.install(*test_dependencies)
session.install(oauth2client)
- if session.python < "3.0":
- session.install("django<2.0.0")
- else:
- session.install("django>=2.0.0")
-
session.install('.')
# Run py.test against the unit tests.
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,11 +21,8 @@
import sys
-if sys.version_info < (2, 7):
- print("google-api-python-client requires python version >= 2.7.", file=sys.stderr)
- sys.exit(1)
-if (3, 1) <= sys.version_info < (3, 4):
- print("google-api-python-client requires python3 version >= 3.4.", file=sys.stderr)
+if sys.version_info < (3, 6):
+ print("google-api-python-client requires python3 version >= 3.6.", file=sys.stderr)
sys.exit(1)
import io
@@ -61,18 +58,17 @@
author_email="[email protected]",
url="https://github.com/googleapis/google-api-python-client/",
install_requires=install_requires,
- python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
+ python_requires=">=3.6",
packages=packages,
package_data={},
license="Apache 2.0",
keywords="google api client",
classifiers=[
- "Programming Language :: Python :: 2",
- "Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
| {"golden_diff": "diff --git a/noxfile.py b/noxfile.py\n--- a/noxfile.py\n+++ b/noxfile.py\n@@ -18,6 +18,7 @@\n import nox\n \n test_dependencies = [\n+ \"django>=2.0.0\",\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n@@ -46,7 +47,7 @@\n )\n \n \[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\[email protected](python=[\"3.6\", \"3.7\", \"3.8\", \"3.9\"])\n @nox.parametrize(\n \"oauth2client\",\n [\n@@ -59,11 +60,6 @@\n def unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n- if session.python < \"3.0\":\n- session.install(\"django<2.0.0\")\n- else:\n- session.install(\"django>=2.0.0\")\n-\n session.install('.')\n \n # Run py.test against the unit tests.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,11 +21,8 @@\n \n import sys\n \n-if sys.version_info < (2, 7):\n- print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n- sys.exit(1)\n-if (3, 1) <= sys.version_info < (3, 4):\n- print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n+if sys.version_info < (3, 6):\n+ print(\"google-api-python-client requires python3 version >= 3.6.\", file=sys.stderr)\n sys.exit(1)\n \n import io\n@@ -61,18 +58,17 @@\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n- python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n+ python_requires=\">=3.6\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n- \"Programming Language :: Python :: 2\",\n- \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n- \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n", "issue": "Python == 2.7\nThe read me states that\r\n\r\n># Deprecated Python Versions\r\n>Python == 2.7\r\n\r\nIt does not appear to work with Python == 2.7. (link to question on [SO](https://stackoverflow.com/q/56731694/1841839))\r\n\r\nShould this note be removed from the [readme](https://github.com/googleapis/google-api-python-client/edit/master/README.md)? to me deprecation means that it should still work just be removed soon.\n", "before_files": [{"content": "\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport nox\n\ntest_dependencies = [\n \"google-auth\",\n \"google-auth-httplib2\",\n \"mox\",\n \"parameterized\",\n \"pyopenssl\",\n \"pytest\",\n \"pytest-cov\",\n \"webtest\",\n \"coverage\",\n \"unittest2\",\n \"mock\",\n]\n\n\[email protected](python=[\"3.7\"])\ndef lint(session):\n session.install(\"flake8\")\n session.run(\n \"flake8\",\n \"googleapiclient\",\n \"tests\",\n \"--count\",\n \"--select=E9,F63,F7,F82\",\n \"--show-source\",\n \"--statistics\",\n )\n\n\[email protected](python=[\"2.7\", \"3.5\", \"3.6\", \"3.7\", \"3.8\"])\[email protected](\n \"oauth2client\",\n [\n \"oauth2client<2dev\",\n \"oauth2client>=2,<=3dev\",\n \"oauth2client>=3,<=4dev\",\n \"oauth2client>=4,<=5dev\",\n ],\n)\ndef unit(session, oauth2client):\n session.install(*test_dependencies)\n session.install(oauth2client)\n if session.python < \"3.0\":\n session.install(\"django<2.0.0\")\n else:\n session.install(\"django>=2.0.0\")\n\n session.install('.')\n\n # Run py.test against the unit tests.\n session.run(\n \"py.test\",\n \"--quiet\",\n \"--cov=googleapiclient\",\n \"--cov=tests\",\n \"--cov-append\",\n \"--cov-config=.coveragerc\",\n \"--cov-report=\",\n \"--cov-fail-under=85\",\n \"tests\",\n *session.posargs,\n )\n\n\[email protected](python=\"3.6\")\ndef docs(session):\n session.install('.')\n session.run(\"python\", \"describe.py\")", "path": "noxfile.py"}, {"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script for Google API Python client.\n\nAlso installs included versions of third party libraries, if those libraries\nare not already installed.\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\n\nif sys.version_info < (2, 7):\n print(\"google-api-python-client requires python version >= 2.7.\", file=sys.stderr)\n sys.exit(1)\nif (3, 1) <= sys.version_info < (3, 4):\n print(\"google-api-python-client requires python3 version >= 3.4.\", file=sys.stderr)\n sys.exit(1)\n\nimport io\nimport os\nfrom setuptools import setup\n\npackages = [\"apiclient\", \"googleapiclient\", \"googleapiclient/discovery_cache\"]\n\ninstall_requires = [\n \"httplib2>=0.15.0,<1dev\",\n \"google-auth>=1.16.0\",\n \"google-auth-httplib2>=0.0.3\",\n \"google-api-core>=1.21.0,<2dev\",\n \"six>=1.13.0,<2dev\",\n \"uritemplate>=3.0.0,<4dev\",\n]\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.md\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = \"1.12.8\"\n\nsetup(\n name=\"google-api-python-client\",\n version=version,\n description=\"Google API Client Library for Python\",\n long_description=readme,\n long_description_content_type='text/markdown',\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n url=\"https://github.com/googleapis/google-api-python-client/\",\n install_requires=install_requires,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*\",\n packages=packages,\n package_data={},\n license=\"Apache 2.0\",\n keywords=\"google api client\",\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]} | 2,232 | 659 |
gh_patches_debug_18713 | rasdani/github-patches | git_diff | pypi__warehouse-3396 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing Purges
Noticed this while setting up new mirror. We don't seem to be purging `project/<normalized_name>` key when projects are deleted.
This leads bandersnatch to get confused and fall behind until the key is purged so the JSON api returns a 404
</issue>
<code>
[start of warehouse/packaging/__init__.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from celery.schedules import crontab
14 from sqlalchemy.orm.base import NO_VALUE
15
16 from warehouse import db
17 from warehouse.accounts.models import User, Email
18 from warehouse.cache.origin import key_factory, receive_set
19 from warehouse.packaging.interfaces import IFileStorage
20 from warehouse.packaging.models import Project, Release
21 from warehouse.packaging.tasks import compute_trending
22
23
24 @db.listens_for(User.name, 'set')
25 def user_name_receive_set(config, target, value, oldvalue, initiator):
26 if oldvalue is not NO_VALUE:
27 receive_set(User.name, config, target)
28
29
30 @db.listens_for(Email.primary, 'set')
31 def email_primary_receive_set(config, target, value, oldvalue, initiator):
32 if oldvalue is not NO_VALUE:
33 receive_set(Email.primary, config, target)
34
35
36 def includeme(config):
37 # Register whatever file storage backend has been configured for storing
38 # our package files.
39 storage_class = config.maybe_dotted(
40 config.registry.settings["files.backend"],
41 )
42 config.register_service_factory(storage_class.create_service, IFileStorage)
43
44 # Register our origin cache keys
45 config.register_origin_cache_keys(
46 Project,
47 cache_keys=["project/{obj.normalized_name}"],
48 purge_keys=[
49 key_factory("project/{obj.normalized_name}"),
50 key_factory("user/{itr.username}", iterate_on='users'),
51 key_factory("all-projects"),
52 ],
53 )
54 config.register_origin_cache_keys(
55 Release,
56 cache_keys=["project/{obj.project.normalized_name}"],
57 purge_keys=[
58 key_factory("project/{obj.project.normalized_name}"),
59 key_factory("user/{itr.username}", iterate_on='project.users'),
60 key_factory("all-projects"),
61 ],
62 )
63 config.register_origin_cache_keys(
64 User,
65 cache_keys=["user/{obj.username}"],
66 )
67 config.register_origin_cache_keys(
68 User.name,
69 purge_keys=[
70 key_factory("user/{obj.username}"),
71 key_factory("project/{itr.normalized_name}", iterate_on='projects')
72 ],
73 )
74 config.register_origin_cache_keys(
75 Email.primary,
76 purge_keys=[
77 key_factory("user/{obj.user.username}"),
78 key_factory(
79 "project/{itr.normalized_name}",
80 iterate_on='user.projects',
81 )
82 ],
83 )
84
85 # Add a periodic task to compute trending once a day, assuming we have
86 # been configured to be able to access BigQuery.
87 if config.get_settings().get("warehouse.trending_table"):
88 config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)
89
[end of warehouse/packaging/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py
--- a/warehouse/packaging/__init__.py
+++ b/warehouse/packaging/__init__.py
@@ -17,7 +17,7 @@
from warehouse.accounts.models import User, Email
from warehouse.cache.origin import key_factory, receive_set
from warehouse.packaging.interfaces import IFileStorage
-from warehouse.packaging.models import Project, Release
+from warehouse.packaging.models import Project, Release, Role
from warehouse.packaging.tasks import compute_trending
@@ -60,6 +60,13 @@
key_factory("all-projects"),
],
)
+ config.register_origin_cache_keys(
+ Role,
+ purge_keys=[
+ key_factory("user/{obj.user.username}"),
+ key_factory("project/{obj.project.normalized_name}")
+ ],
+ )
config.register_origin_cache_keys(
User,
cache_keys=["user/{obj.username}"],
| {"golden_diff": "diff --git a/warehouse/packaging/__init__.py b/warehouse/packaging/__init__.py\n--- a/warehouse/packaging/__init__.py\n+++ b/warehouse/packaging/__init__.py\n@@ -17,7 +17,7 @@\n from warehouse.accounts.models import User, Email\n from warehouse.cache.origin import key_factory, receive_set\n from warehouse.packaging.interfaces import IFileStorage\n-from warehouse.packaging.models import Project, Release\n+from warehouse.packaging.models import Project, Release, Role\n from warehouse.packaging.tasks import compute_trending\n \n \n@@ -60,6 +60,13 @@\n key_factory(\"all-projects\"),\n ],\n )\n+ config.register_origin_cache_keys(\n+ Role,\n+ purge_keys=[\n+ key_factory(\"user/{obj.user.username}\"),\n+ key_factory(\"project/{obj.project.normalized_name}\")\n+ ],\n+ )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n", "issue": "Missing Purges\nNoticed this while setting up new mirror. We don't seem to be purging `project/<normalized_name>` key when projects are deleted.\r\n\r\nThis leads bandersnatch to get confused and fall behind until the key is purged so the JSON api returns a 404\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom celery.schedules import crontab\nfrom sqlalchemy.orm.base import NO_VALUE\n\nfrom warehouse import db\nfrom warehouse.accounts.models import User, Email\nfrom warehouse.cache.origin import key_factory, receive_set\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import Project, Release\nfrom warehouse.packaging.tasks import compute_trending\n\n\[email protected]_for(User.name, 'set')\ndef user_name_receive_set(config, target, value, oldvalue, initiator):\n if oldvalue is not NO_VALUE:\n receive_set(User.name, config, target)\n\n\[email protected]_for(Email.primary, 'set')\ndef email_primary_receive_set(config, target, value, oldvalue, initiator):\n if oldvalue is not NO_VALUE:\n receive_set(Email.primary, config, target)\n\n\ndef includeme(config):\n # Register whatever file storage backend has been configured for storing\n # our package files.\n storage_class = config.maybe_dotted(\n config.registry.settings[\"files.backend\"],\n )\n config.register_service_factory(storage_class.create_service, IFileStorage)\n\n # Register our origin cache keys\n config.register_origin_cache_keys(\n Project,\n cache_keys=[\"project/{obj.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n Release,\n cache_keys=[\"project/{obj.project.normalized_name}\"],\n purge_keys=[\n key_factory(\"project/{obj.project.normalized_name}\"),\n key_factory(\"user/{itr.username}\", iterate_on='project.users'),\n key_factory(\"all-projects\"),\n ],\n )\n config.register_origin_cache_keys(\n User,\n cache_keys=[\"user/{obj.username}\"],\n )\n config.register_origin_cache_keys(\n User.name,\n purge_keys=[\n key_factory(\"user/{obj.username}\"),\n key_factory(\"project/{itr.normalized_name}\", iterate_on='projects')\n ],\n )\n config.register_origin_cache_keys(\n Email.primary,\n purge_keys=[\n key_factory(\"user/{obj.user.username}\"),\n key_factory(\n \"project/{itr.normalized_name}\",\n iterate_on='user.projects',\n )\n ],\n )\n\n # Add a periodic task to compute trending once a day, assuming we have\n # been configured to be able to access BigQuery.\n if config.get_settings().get(\"warehouse.trending_table\"):\n config.add_periodic_task(crontab(minute=0, hour=3), compute_trending)\n", "path": "warehouse/packaging/__init__.py"}]} | 1,440 | 216 |
gh_patches_debug_21636 | rasdani/github-patches | git_diff | cloudtools__troposphere-1775 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add AdvancedSecurityOptions property to AWS ElasticSearch Domain
**Description:**
AWS Elasticsearch now supports fine-grained access control with Cloudformation. Need to add AdvancedSecurityOptions and MasterUserOptions to AWS::Elasticsearch::Domain object to enable this new functionality.
https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedsecurityoptions
</issue>
<code>
[start of troposphere/elasticsearch.py]
1 # Copyright (c) 2012-2015, Mark Peek <[email protected]>
2 # All rights reserved.
3 #
4 # See LICENSE file for full license.
5
6 from . import AWSProperty, AWSObject, Tags
7 from .compat import policytypes
8 from .validators import boolean, integer, integer_range, positive_integer
9
10 VALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')
11
12
13 def validate_volume_type(volume_type):
14 """Validate VolumeType for ElasticsearchDomain"""
15 if volume_type not in VALID_VOLUME_TYPES:
16 raise ValueError("Elasticsearch Domain VolumeType must be one of: %s" %
17 ", ".join(VALID_VOLUME_TYPES))
18 return volume_type
19
20
21 class CognitoOptions(AWSProperty):
22 props = {
23 'Enabled': (boolean, False),
24 'IdentityPoolId': (basestring, False),
25 'RoleArn': (basestring, False),
26 'UserPoolId': (basestring, False),
27 }
28
29
30 class EBSOptions(AWSProperty):
31 props = {
32 'EBSEnabled': (boolean, False),
33 'Iops': (positive_integer, False),
34 'VolumeSize': (integer, False),
35 'VolumeType': (validate_volume_type, False)
36 }
37
38 def validate(self):
39 volume_type = self.properties.get('VolumeType')
40 iops = self.properties.get('Iops')
41 if volume_type == 'io1' and not iops:
42 raise ValueError("Must specify Iops if VolumeType is 'io1'.")
43
44
45 class ZoneAwarenessConfig(AWSProperty):
46 props = {
47 'AvailabilityZoneCount': (integer, False),
48 }
49
50
51 class ElasticsearchClusterConfig(AWSProperty):
52 props = {
53 'DedicatedMasterCount': (integer, False),
54 'DedicatedMasterEnabled': (boolean, False),
55 'DedicatedMasterType': (basestring, False),
56 'InstanceCount': (integer, False),
57 'InstanceType': (basestring, False),
58 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),
59 'ZoneAwarenessEnabled': (boolean, False)
60 }
61
62
63 class EncryptionAtRestOptions(AWSProperty):
64 props = {
65 'Enabled': (boolean, False),
66 'KmsKeyId': (basestring, False),
67 }
68
69
70 class NodeToNodeEncryptionOptions(AWSProperty):
71 props = {
72 'Enabled': (boolean, False),
73 }
74
75
76 class SnapshotOptions(AWSProperty):
77 props = {
78 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)
79 }
80
81
82 class VPCOptions(AWSProperty):
83 props = {
84 "SecurityGroupIds": ([basestring], False),
85 "SubnetIds": ([basestring], False)
86 }
87
88
89 class Domain(AWSObject):
90 resource_type = "AWS::Elasticsearch::Domain"
91
92 props = {
93 'AccessPolicies': (policytypes, False),
94 'AdvancedOptions': (dict, False),
95 'CognitoOptions': (CognitoOptions, False),
96 'DomainName': (basestring, False),
97 'EBSOptions': (EBSOptions, False),
98 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),
99 'ElasticsearchVersion': (basestring, False),
100 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),
101 'LogPublishingOptions': (dict, False),
102 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),
103 'SnapshotOptions': (SnapshotOptions, False),
104 'Tags': ((Tags, list), False),
105 'VPCOptions': (VPCOptions, False),
106 }
107
108
109 # Backward compatibility
110 ElasticsearchDomain = Domain
111
[end of troposphere/elasticsearch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py
--- a/troposphere/elasticsearch.py
+++ b/troposphere/elasticsearch.py
@@ -81,8 +81,24 @@
class VPCOptions(AWSProperty):
props = {
- "SecurityGroupIds": ([basestring], False),
- "SubnetIds": ([basestring], False)
+ 'SecurityGroupIds': ([basestring], False),
+ 'SubnetIds': ([basestring], False)
+ }
+
+
+class MasterUserOptions(AWSProperty):
+ props = {
+ 'MasterUserARN': (basestring, False),
+ 'MasterUserName': (basestring, False),
+ 'MasterUserPassword': (basestring, False),
+ }
+
+
+class AdvancedSecurityOptionsInput(AWSProperty):
+ props = {
+ 'Enabled': (boolean, False),
+ 'InternalUserDatabaseEnabled': (boolean, False),
+ 'MasterUserOptions': (MasterUserOptions, False),
}
@@ -92,6 +108,7 @@
props = {
'AccessPolicies': (policytypes, False),
'AdvancedOptions': (dict, False),
+ 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),
'CognitoOptions': (CognitoOptions, False),
'DomainName': (basestring, False),
'EBSOptions': (EBSOptions, False),
| {"golden_diff": "diff --git a/troposphere/elasticsearch.py b/troposphere/elasticsearch.py\n--- a/troposphere/elasticsearch.py\n+++ b/troposphere/elasticsearch.py\n@@ -81,8 +81,24 @@\n \n class VPCOptions(AWSProperty):\n props = {\n- \"SecurityGroupIds\": ([basestring], False),\n- \"SubnetIds\": ([basestring], False)\n+ 'SecurityGroupIds': ([basestring], False),\n+ 'SubnetIds': ([basestring], False)\n+ }\n+\n+\n+class MasterUserOptions(AWSProperty):\n+ props = {\n+ 'MasterUserARN': (basestring, False),\n+ 'MasterUserName': (basestring, False),\n+ 'MasterUserPassword': (basestring, False),\n+ }\n+\n+\n+class AdvancedSecurityOptionsInput(AWSProperty):\n+ props = {\n+ 'Enabled': (boolean, False),\n+ 'InternalUserDatabaseEnabled': (boolean, False),\n+ 'MasterUserOptions': (MasterUserOptions, False),\n }\n \n \n@@ -92,6 +108,7 @@\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n+ 'AdvancedSecurityOptions': (AdvancedSecurityOptionsInput, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n", "issue": "Add AdvancedSecurityOptions property to AWS ElasticSearch Domain\n**Description:**\r\n\r\nAWS Elasticsearch now supports fine-grained access control with Cloudformation. Need to add AdvancedSecurityOptions and MasterUserOptions to AWS::Elasticsearch::Domain object to enable this new functionality. \r\n\r\nhttps://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedsecurityoptions\n", "before_files": [{"content": "# Copyright (c) 2012-2015, Mark Peek <[email protected]>\n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom . import AWSProperty, AWSObject, Tags\nfrom .compat import policytypes\nfrom .validators import boolean, integer, integer_range, positive_integer\n\nVALID_VOLUME_TYPES = ('standard', 'gp2', 'io1')\n\n\ndef validate_volume_type(volume_type):\n \"\"\"Validate VolumeType for ElasticsearchDomain\"\"\"\n if volume_type not in VALID_VOLUME_TYPES:\n raise ValueError(\"Elasticsearch Domain VolumeType must be one of: %s\" %\n \", \".join(VALID_VOLUME_TYPES))\n return volume_type\n\n\nclass CognitoOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'IdentityPoolId': (basestring, False),\n 'RoleArn': (basestring, False),\n 'UserPoolId': (basestring, False),\n }\n\n\nclass EBSOptions(AWSProperty):\n props = {\n 'EBSEnabled': (boolean, False),\n 'Iops': (positive_integer, False),\n 'VolumeSize': (integer, False),\n 'VolumeType': (validate_volume_type, False)\n }\n\n def validate(self):\n volume_type = self.properties.get('VolumeType')\n iops = self.properties.get('Iops')\n if volume_type == 'io1' and not iops:\n raise ValueError(\"Must specify Iops if VolumeType is 'io1'.\")\n\n\nclass ZoneAwarenessConfig(AWSProperty):\n props = {\n 'AvailabilityZoneCount': (integer, False),\n }\n\n\nclass ElasticsearchClusterConfig(AWSProperty):\n props = {\n 'DedicatedMasterCount': (integer, False),\n 'DedicatedMasterEnabled': (boolean, False),\n 'DedicatedMasterType': (basestring, False),\n 'InstanceCount': (integer, False),\n 'InstanceType': (basestring, False),\n 'ZoneAwarenessConfig': (ZoneAwarenessConfig, False),\n 'ZoneAwarenessEnabled': (boolean, False)\n }\n\n\nclass EncryptionAtRestOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n 'KmsKeyId': (basestring, False),\n }\n\n\nclass NodeToNodeEncryptionOptions(AWSProperty):\n props = {\n 'Enabled': (boolean, False),\n }\n\n\nclass SnapshotOptions(AWSProperty):\n props = {\n 'AutomatedSnapshotStartHour': (integer_range(0, 23), False)\n }\n\n\nclass VPCOptions(AWSProperty):\n props = {\n \"SecurityGroupIds\": ([basestring], False),\n \"SubnetIds\": ([basestring], False)\n }\n\n\nclass Domain(AWSObject):\n resource_type = \"AWS::Elasticsearch::Domain\"\n\n props = {\n 'AccessPolicies': (policytypes, False),\n 'AdvancedOptions': (dict, False),\n 'CognitoOptions': (CognitoOptions, False),\n 'DomainName': (basestring, False),\n 'EBSOptions': (EBSOptions, False),\n 'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False),\n 'ElasticsearchVersion': (basestring, False),\n 'EncryptionAtRestOptions': (EncryptionAtRestOptions, False),\n 'LogPublishingOptions': (dict, False),\n 'NodeToNodeEncryptionOptions': (NodeToNodeEncryptionOptions, False),\n 'SnapshotOptions': (SnapshotOptions, False),\n 'Tags': ((Tags, list), False),\n 'VPCOptions': (VPCOptions, False),\n }\n\n\n# Backward compatibility\nElasticsearchDomain = Domain\n", "path": "troposphere/elasticsearch.py"}]} | 1,651 | 322 |
gh_patches_debug_39486 | rasdani/github-patches | git_diff | PlasmaPy__PlasmaPy-491 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve project description on PyPI
[Project description](https://pypi.org/project/plasmapy/#description) is currently bland. We should detail some functionality, problems that the project is trying to address, mention openAstronomy affiliation, etc.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Licensed under a 3-clause BSD style license - see LICENSE.rst
3
4 import glob
5 import os
6 import sys
7
8 # Enforce Python version check - this is the same check as in __init__.py but
9 # this one has to happen before importing ah_bootstrap.
10 if sys.version_info < tuple((int(val) for val in "3.6".split('.'))):
11 sys.stderr.write("ERROR: plasmapy requires Python {} or later\n".format(3.6))
12 sys.exit(1)
13
14 import ah_bootstrap
15 from setuptools import setup
16
17 # A dirty hack to get around some early import/configurations ambiguities
18 if sys.version_info[0] >= 3:
19 import builtins
20 else:
21 import __builtin__ as builtins
22 builtins._ASTROPY_SETUP_ = True
23
24 from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
25 get_package_info)
26 from astropy_helpers.git_helpers import get_git_devstr
27 from astropy_helpers.version_helpers import generate_version_py
28
29 # Get some values from the setup.cfg
30 try:
31 from ConfigParser import ConfigParser
32 except ImportError:
33 from configparser import ConfigParser
34
35 conf = ConfigParser()
36 conf.read(['setup.cfg'])
37 metadata = dict(conf.items('metadata'))
38
39 PACKAGENAME = metadata.get('package_name', 'plasmapy')
40 DESCRIPTION = metadata.get('description', 'plasmapy')
41 AUTHOR = metadata.get('author', 'PlasmaPy Developers')
42 AUTHOR_EMAIL = metadata.get('author_email', '')
43 LICENSE = metadata.get('license', 'unknown')
44 URL = metadata.get('url', 'http://plasmapy.org')
45
46 # order of priority for long_description:
47 # (1) set in setup.cfg,
48 # (2) load LONG_DESCRIPTION.rst,
49 # (3) load README.rst,
50 # (4) package docstring
51 readme_glob = 'README*'
52 _cfg_long_description = metadata.get('long_description', '')
53 if _cfg_long_description:
54 LONG_DESCRIPTION = _cfg_long_description
55
56 elif os.path.exists('LONG_DESCRIPTION.rst'):
57 with open('LONG_DESCRIPTION.rst') as f:
58 LONG_DESCRIPTION = f.read()
59
60 elif len(glob.glob(readme_glob)) > 0:
61 with open(glob.glob(readme_glob)[0]) as f:
62 LONG_DESCRIPTION = f.read()
63
64 else:
65 # Get the long description from the package's docstring
66 __import__(PACKAGENAME)
67 package = sys.modules[PACKAGENAME]
68 LONG_DESCRIPTION = package.__doc__
69
70 # Store the package name in a built-in variable so it's easy
71 # to get from other parts of the setup infrastructure
72 builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
73
74 # VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)
75 VERSION = metadata.get('version', '0.0.dev0')
76
77 # Indicates if this version is a release version
78 RELEASE = 'dev' not in VERSION
79
80 if not RELEASE:
81 VERSION += get_git_devstr(False)
82
83 # Populate the dict of setup command overrides; this should be done before
84 # invoking any other functionality from distutils since it can potentially
85 # modify distutils' behavior.
86 cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
87
88 # Freeze build information in version.py
89 generate_version_py(PACKAGENAME, VERSION, RELEASE,
90 get_debug_option(PACKAGENAME))
91
92 # Treat everything in scripts except README* as a script to be installed
93 scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
94 if not os.path.basename(fname).startswith('README')]
95
96
97 # Get configuration information from all of the various subpackages.
98 # See the docstring for setup_helpers.update_package_files for more
99 # details.
100 package_info = get_package_info()
101
102 # Add the project-global data
103 package_info['package_data'].setdefault(PACKAGENAME, [])
104 package_info['package_data'][PACKAGENAME].append('data/*')
105
106 # Define entry points for command-line scripts
107 entry_points = {'console_scripts': []}
108
109 if conf.has_section('entry_points'):
110 entry_point_list = conf.items('entry_points')
111 for entry_point in entry_point_list:
112 entry_points['console_scripts'].append('{0} = {1}'.format(
113 entry_point[0], entry_point[1]))
114
115 # Include all .c files, recursively, including those generated by
116 # Cython, since we can not do this in MANIFEST.in with a "dynamic"
117 # directory name.
118 c_files = []
119 for root, dirs, files in os.walk(PACKAGENAME):
120 for filename in files:
121 if filename.endswith('.c'):
122 c_files.append(
123 os.path.join(
124 os.path.relpath(root, PACKAGENAME), filename))
125 package_info['package_data'][PACKAGENAME].extend(c_files)
126
127 # Note that requires and provides should not be included in the call to
128 # ``setup``, since these are now deprecated. See this link for more details:
129 # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
130
131 setup(name=PACKAGENAME,
132 version=VERSION,
133 description=DESCRIPTION,
134 scripts=scripts,
135 setup_requires=metadata.get("setup_requires", None),
136 install_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],
137 author=AUTHOR,
138 author_email=AUTHOR_EMAIL,
139 license=LICENSE,
140 url=URL,
141 long_description=LONG_DESCRIPTION,
142 cmdclass=cmdclassd,
143 zip_safe=False,
144 use_2to3=False,
145 include_package_data=True,
146 entry_points=entry_points,
147 python_requires='>={}'.format("3.6"),
148 **package_info
149 )
150
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,6 +23,7 @@
from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
get_package_info)
+from astropy_helpers.distutils_helpers import is_distutils_display_option
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
@@ -124,6 +125,18 @@
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
+setup_requires = ['numpy']
+
+# Make sure to have the packages needed for building PlasmaPy, but do not require them
+# when installing from an sdist as the c files are included there.
+if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):
+ setup_requires.extend(['cython>=0.27.2'])
+
+# Avoid installing setup_requires dependencies if the user just
+# queries for information
+if is_distutils_display_option():
+ setup_requires = []
+
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
@@ -132,18 +145,33 @@
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
- setup_requires=metadata.get("setup_requires", None),
+ setup_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],
install_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
+ keywords=['plasma', 'physics', 'transport', 'collisions', 'science',
+ 'atomic', 'particle', 'simulation', 'langmuir', 'tokamak',
+ 'instability', 'modeling'],
+ classifiers=[
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: BSD-2-Clause-Patent',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: C',
+ 'Programming Language :: Cython',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Topic :: Scientific/Engineering :: Astronomy',
+ 'Topic :: Scientific/Engineering :: Physics'
+ ],
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
include_package_data=True,
entry_points=entry_points,
python_requires='>={}'.format("3.6"),
+ tests_require=["pytest", "pytest-astropy"],
**package_info
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,6 +23,7 @@\n \n from astropy_helpers.setup_helpers import (register_commands, get_debug_option,\n get_package_info)\n+from astropy_helpers.distutils_helpers import is_distutils_display_option\n from astropy_helpers.git_helpers import get_git_devstr\n from astropy_helpers.version_helpers import generate_version_py\n \n@@ -124,6 +125,18 @@\n os.path.relpath(root, PACKAGENAME), filename))\n package_info['package_data'][PACKAGENAME].extend(c_files)\n \n+setup_requires = ['numpy']\n+\n+# Make sure to have the packages needed for building PlasmaPy, but do not require them\n+# when installing from an sdist as the c files are included there.\n+if not os.path.exists(os.path.join(os.path.dirname(__file__), 'PKG-INFO')):\n+ setup_requires.extend(['cython>=0.27.2'])\n+\n+# Avoid installing setup_requires dependencies if the user just\n+# queries for information\n+if is_distutils_display_option():\n+ setup_requires = []\n+\n # Note that requires and provides should not be included in the call to\n # ``setup``, since these are now deprecated. See this link for more details:\n # https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM\n@@ -132,18 +145,33 @@\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n- setup_requires=metadata.get(\"setup_requires\", None),\n+ setup_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],\n install_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n+ keywords=['plasma', 'physics', 'transport', 'collisions', 'science',\n+ 'atomic', 'particle', 'simulation', 'langmuir', 'tokamak',\n+ 'instability', 'modeling'],\n+ classifiers=[\n+ 'Intended Audience :: Science/Research',\n+ 'License :: OSI Approved :: BSD-2-Clause-Patent',\n+ 'Operating System :: OS Independent',\n+ 'Programming Language :: C',\n+ 'Programming Language :: Cython',\n+ 'Programming Language :: Python :: 3',\n+ 'Programming Language :: Python :: Implementation :: CPython',\n+ 'Topic :: Scientific/Engineering :: Astronomy',\n+ 'Topic :: Scientific/Engineering :: Physics'\n+ ],\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n entry_points=entry_points,\n python_requires='>={}'.format(\"3.6\"),\n+ tests_require=[\"pytest\", \"pytest-astropy\"],\n **package_info\n )\n", "issue": "Improve project description on PyPI\n[Project description](https://pypi.org/project/plasmapy/#description) is currently bland. We should detail some functionality, problems that the project is trying to address, mention openAstronomy affiliation, etc.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport glob\nimport os\nimport sys\n\n# Enforce Python version check - this is the same check as in __init__.py but\n# this one has to happen before importing ah_bootstrap.\nif sys.version_info < tuple((int(val) for val in \"3.6\".split('.'))):\n sys.stderr.write(\"ERROR: plasmapy requires Python {} or later\\n\".format(3.6))\n sys.exit(1)\n\nimport ah_bootstrap\nfrom setuptools import setup\n\n# A dirty hack to get around some early import/configurations ambiguities\nif sys.version_info[0] >= 3:\n import builtins\nelse:\n import __builtin__ as builtins\nbuiltins._ASTROPY_SETUP_ = True\n\nfrom astropy_helpers.setup_helpers import (register_commands, get_debug_option,\n get_package_info)\nfrom astropy_helpers.git_helpers import get_git_devstr\nfrom astropy_helpers.version_helpers import generate_version_py\n\n# Get some values from the setup.cfg\ntry:\n from ConfigParser import ConfigParser\nexcept ImportError:\n from configparser import ConfigParser\n\nconf = ConfigParser()\nconf.read(['setup.cfg'])\nmetadata = dict(conf.items('metadata'))\n\nPACKAGENAME = metadata.get('package_name', 'plasmapy')\nDESCRIPTION = metadata.get('description', 'plasmapy')\nAUTHOR = metadata.get('author', 'PlasmaPy Developers')\nAUTHOR_EMAIL = metadata.get('author_email', '')\nLICENSE = metadata.get('license', 'unknown')\nURL = metadata.get('url', 'http://plasmapy.org')\n\n# order of priority for long_description:\n# (1) set in setup.cfg,\n# (2) load LONG_DESCRIPTION.rst,\n# (3) load README.rst,\n# (4) package docstring\nreadme_glob = 'README*'\n_cfg_long_description = metadata.get('long_description', '')\nif _cfg_long_description:\n LONG_DESCRIPTION = _cfg_long_description\n\nelif os.path.exists('LONG_DESCRIPTION.rst'):\n with open('LONG_DESCRIPTION.rst') as f:\n LONG_DESCRIPTION = f.read()\n\nelif len(glob.glob(readme_glob)) > 0:\n with open(glob.glob(readme_glob)[0]) as f:\n LONG_DESCRIPTION = f.read()\n\nelse:\n # Get the long description from the package's docstring\n __import__(PACKAGENAME)\n package = sys.modules[PACKAGENAME]\n LONG_DESCRIPTION = package.__doc__\n\n# Store the package name in a built-in variable so it's easy\n# to get from other parts of the setup infrastructure\nbuiltins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME\n\n# VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)\nVERSION = metadata.get('version', '0.0.dev0')\n\n# Indicates if this version is a release version\nRELEASE = 'dev' not in VERSION\n\nif not RELEASE:\n VERSION += get_git_devstr(False)\n\n# Populate the dict of setup command overrides; this should be done before\n# invoking any other functionality from distutils since it can potentially\n# modify distutils' behavior.\ncmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)\n\n# Freeze build information in version.py\ngenerate_version_py(PACKAGENAME, VERSION, RELEASE,\n get_debug_option(PACKAGENAME))\n\n# Treat everything in scripts except README* as a script to be installed\nscripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))\n if not os.path.basename(fname).startswith('README')]\n\n\n# Get configuration information from all of the various subpackages.\n# See the docstring for setup_helpers.update_package_files for more\n# details.\npackage_info = get_package_info()\n\n# Add the project-global data\npackage_info['package_data'].setdefault(PACKAGENAME, [])\npackage_info['package_data'][PACKAGENAME].append('data/*')\n\n# Define entry points for command-line scripts\nentry_points = {'console_scripts': []}\n\nif conf.has_section('entry_points'):\n entry_point_list = conf.items('entry_points')\n for entry_point in entry_point_list:\n entry_points['console_scripts'].append('{0} = {1}'.format(\n entry_point[0], entry_point[1]))\n\n# Include all .c files, recursively, including those generated by\n# Cython, since we can not do this in MANIFEST.in with a \"dynamic\"\n# directory name.\nc_files = []\nfor root, dirs, files in os.walk(PACKAGENAME):\n for filename in files:\n if filename.endswith('.c'):\n c_files.append(\n os.path.join(\n os.path.relpath(root, PACKAGENAME), filename))\npackage_info['package_data'][PACKAGENAME].extend(c_files)\n\n# Note that requires and provides should not be included in the call to\n# ``setup``, since these are now deprecated. See this link for more details:\n# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM\n\nsetup(name=PACKAGENAME,\n version=VERSION,\n description=DESCRIPTION,\n scripts=scripts,\n setup_requires=metadata.get(\"setup_requires\", None),\n install_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n license=LICENSE,\n url=URL,\n long_description=LONG_DESCRIPTION,\n cmdclass=cmdclassd,\n zip_safe=False,\n use_2to3=False,\n include_package_data=True,\n entry_points=entry_points,\n python_requires='>={}'.format(\"3.6\"),\n **package_info\n)\n", "path": "setup.py"}]} | 2,172 | 645 |
gh_patches_debug_10450 | rasdani/github-patches | git_diff | celery__celery-6020 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to use mysql SSL parameters in create_engine()
PR for proposed fix to this issue: https://github.com/celery/celery/pull/6020
# Checklist
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.
- [x] I have read the relevant section in the
[contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)
on reporting bugs.
- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)
for similar or identical bug reports.
- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)
for existing proposed fixes.
- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)
to find out if the bug was already fixed in the master branch.
- [x] I have included all related issues and possible duplicate issues
in this issue (If there are none, check this box anyway).
## Mandatory Debugging Information
- [x] I have included the output of ``celery -A proj report`` in the issue.
(if you are not able to do this, then at least specify the Celery
version affected).
- [x] I have verified that the issue exists against the `master` branch of Celery.
- [x] I have included the contents of ``pip freeze`` in the issue.
- [x] I have included all the versions of all the external dependencies required
to reproduce this bug.
## Optional Debugging Information
- [ ] I have tried reproducing the issue on more than one Python version
and/or implementation.
- [x] I have tried reproducing the issue on more than one message broker and/or
result backend.
- [ ] I have tried reproducing the issue on more than one version of the message
broker and/or result backend.
- [x] I have tried reproducing the issue on more than one operating system.
- [ ] I have tried reproducing the issue on more than one workers pool.
- [ ] I have tried reproducing the issue with autoscaling, retries,
ETA/Countdown & rate limits disabled.
- [ ] I have tried reproducing the issue after downgrading
and/or upgrading Celery and its dependencies.
## Related Issues and Possible Duplicates
https://github.com/celery/celery/commit/94dae1b899aae6ae2ca333773fddbc6dd603213c
This PR was made to address the following issue, which has resulted in the issue I am having now. https://github.com/celery/celery/issues/1930
#### Related Issues
https://github.com/celery/celery/issues/1930
#### Possible Duplicates
- None
## Environment & Settings
<!-- Include the contents of celery --version below -->
**Celery version**: celery>=4.0.0 (using it in Airflow)
</p>
</details>
# Steps to Reproduce
(see Minimally Reproducible Test Case for step by step commands. This contains information leading to the issue and a proposed fix)
In Airflow, you can set celery configs. I was setting up cloudsql to use a private IP instead of a proxy. Currently, we use mysql as the `results_backend`. Changing the host address from local host to the private IP caused some errors, as expected.
```
OperationalError: (_mysql_exceptions.OperationalError) (1045, "Access denied for user 'airflow'@'10.x.x.xxx' (using password: YES)")
```
In order to use the private IP, I need to use the SSL cert, key, and ca. I confirmed that by logging into the Airflow worker and scheduler pods that my url and engine arg params worked.
```
from airflow.models import DagRun
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
e = create_engine({AIRFLOW__CELERY__SQL_ALCHEMY_CONN},connect_args= {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}})
s = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=e))
s.query(DagRun).all()
```
This worked fine, so I know that the my ssl certs are accessible, the engine can be created, and a session used. Non-celery mysql connections no longer gave an error.
The Celery documentation (https://docs.celeryproject.org/en/stable/userguide/configuration.html#conf-database-result-backend) outlines how to add engine args to via `database_engine_options`. Therefore, I added
```
'database_engine_options': {
'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}}
```
However, I still get the same error.
```
OperationalError: (_mysql_exceptions.OperationalError) (1045, "Access denied for user 'airflow'@'10.x.x.xxx' (using password: YES)")
```
Additionally, I get logs in the scheduler like the following:
```
{{__init__.py:56}} WARNING - Failed operation _get_task_meta_for. Retrying 1 more times.
68918-Traceback (most recent call last):
68919- File "/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py", line 51, in _inner
68920- return fun(*args, **kwargs)
68921- File "/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py", line 154, in _get_task_meta_for
68922: session = self.ResultSession()
68923: File "/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py", line 113, in ResultSession
68924- **self.engine_options)
68925- File "/usr/local/lib/python2.7/dist-packages/celery/backends/database/session.py", line 59, in session_factory
68926- self.prepare_models(engine)
68927- File "/usr/local/lib/python2.7/dist-packages/celery/backends/database/session.py", line 54, in prepare_models
68928- ResultModelBase.metadata.create_all(engine)
```
After digging through the code with @dangermike, we noticed that `get_engine` will not use the kwargs passed to it unless it has been forked.(https://github.com/celery/celery/blob/master/celery/backends/database/session.py#L34) Therefore, the SSL params will not be passed in our case. The only place that self.forked = True is after the fork cleanup session. This used to not be the case (https://github.com/celery/celery/commit/94dae1b899aae6ae2ca333773fddbc6dd603213c), but after an issue was made about passing pool_size (https://github.com/celery/celery/issues/1930), `**kwargs` were taken out of create_engine() entirely.
Possibly something like the following would allow for kwargs to be passed in, while still addressing the pool params issue.
```
class SessionManager(object):
# ...
def get_engine(self, dburi, **kwargs):
if self.forked:
try:
return self._engines[dburi]
except KeyError:
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
kwargs = dict([(k, v) for k, v in kwargs.items() if not k.startswith('pool')])
return create_engine(dburi, poolclass=NullPool, **kwargs)
```
where `kwargs = dict([(k, v) for k, v in kwargs.items() if not k.startswith('pool')])` omits any pool args while keeping the rest.
## Required Dependencies
<!-- Please fill the required dependencies to reproduce this issue -->
* **Minimal Python Version**: >=2.7
* **Minimal Celery Version**: >=4.0.0
* **Minimal Kombu Version**: N/A or Unknown
* **Minimal Broker Version**: N/A or Unknown
* **Minimal Result Backend Version**: N/A or Unknown
* **Minimal OS and/or Kernel Version**: N/A or Unknown
* **Minimal Broker Client Version**: N/A or Unknown
* **Minimal Result Backend Client Version**: N/A or Unknown
### Python Packages
Used Airflow
### Other Dependencies
N/A
## Minimally Reproducible Test Case
In a python shell,
get the url with a private mysql IP to make result_backend, giving something like `db+mysql://airflow:***@10.x.xx.xx/airflow`
and the celery config
```
celery_configuration =
{'broker_transport_options': {'visibility_timeout': 21600},
'result_serializer': 'pickle',
'task_acks_late': True,
'database_engine_options': { 'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}},
'task_default_queue': 'default',
'worker_concurrency': 32,
'worker_prefetch_multiplier': 1,
'event_serializer': 'json',
'accept_content': ['json', 'pickle'],
'broker_url': 'redis://{URL}/1',
'result_backend': 'db+mysql://airflow:***@10.x.xx.xx/airflow',
'task_default_exchange': 'default'}
```
the line most important here is:
` 'database_engine_options': { 'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}}`
then try to connect to result_backend by creating app.
```
app = Celery(celery_app_name=airflow.executors.celery_executor,
config_source=celery_configuration)
```
create a database backend
```
dbbe = database.DatabaseBackend(url={results_backend url without the 'db+' in the beginning}, engine_options=celery_configuration['database_engine_options'], app=app)
```
and you will get the error again
```
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1045, "Access denied for user 'airflow'@'10.xx.xx.xxx' (using password: YES)")
(Background on this error at: http://sqlalche.me/e/e3q8)
```
# Expected Behavior
It seems like the expected behavior here would be for the connection to be successful and use the SSL certs in the **kwargs passed into `get_engine`.
# Actual Behavior
Since self.fork is not True, and will not be True, create_engine is made by:
```
return create_engine(dburi, poolclass=NullPool)
```
since the SSL certs are not included, an error is returned and the connection is _not_ successful.
```
sqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1045, "Access denied for user 'airflow'@'10.xx.xx.xxx' (using password: YES)")
(Background on this error at: http://sqlalche.me/e/e3q8)
```
</issue>
<code>
[start of celery/backends/database/session.py]
1 # -*- coding: utf-8 -*-
2 """SQLAlchemy session."""
3 from __future__ import absolute_import, unicode_literals
4
5 from kombu.utils.compat import register_after_fork
6 from sqlalchemy import create_engine
7 from sqlalchemy.ext.declarative import declarative_base
8 from sqlalchemy.orm import sessionmaker
9 from sqlalchemy.pool import NullPool
10
11 ResultModelBase = declarative_base()
12
13 __all__ = ('SessionManager',)
14
15
16 def _after_fork_cleanup_session(session):
17 session._after_fork()
18
19
20 class SessionManager(object):
21 """Manage SQLAlchemy sessions."""
22
23 def __init__(self):
24 self._engines = {}
25 self._sessions = {}
26 self.forked = False
27 self.prepared = False
28 if register_after_fork is not None:
29 register_after_fork(self, _after_fork_cleanup_session)
30
31 def _after_fork(self):
32 self.forked = True
33
34 def get_engine(self, dburi, **kwargs):
35 if self.forked:
36 try:
37 return self._engines[dburi]
38 except KeyError:
39 engine = self._engines[dburi] = create_engine(dburi, **kwargs)
40 return engine
41 else:
42 return create_engine(dburi, poolclass=NullPool)
43
44 def create_session(self, dburi, short_lived_sessions=False, **kwargs):
45 engine = self.get_engine(dburi, **kwargs)
46 if self.forked:
47 if short_lived_sessions or dburi not in self._sessions:
48 self._sessions[dburi] = sessionmaker(bind=engine)
49 return engine, self._sessions[dburi]
50 return engine, sessionmaker(bind=engine)
51
52 def prepare_models(self, engine):
53 if not self.prepared:
54 ResultModelBase.metadata.create_all(engine)
55 self.prepared = True
56
57 def session_factory(self, dburi, **kwargs):
58 engine, session = self.create_session(dburi, **kwargs)
59 self.prepare_models(engine)
60 return session()
61
[end of celery/backends/database/session.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py
--- a/celery/backends/database/session.py
+++ b/celery/backends/database/session.py
@@ -39,7 +39,9 @@
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
- return create_engine(dburi, poolclass=NullPool)
+ kwargs = dict([(k, v) for k, v in kwargs.items() if
+ not k.startswith('pool')])
+ return create_engine(dburi, poolclass=NullPool, **kwargs)
def create_session(self, dburi, short_lived_sessions=False, **kwargs):
engine = self.get_engine(dburi, **kwargs)
| {"golden_diff": "diff --git a/celery/backends/database/session.py b/celery/backends/database/session.py\n--- a/celery/backends/database/session.py\n+++ b/celery/backends/database/session.py\n@@ -39,7 +39,9 @@\n engine = self._engines[dburi] = create_engine(dburi, **kwargs)\n return engine\n else:\n- return create_engine(dburi, poolclass=NullPool)\n+ kwargs = dict([(k, v) for k, v in kwargs.items() if\n+ not k.startswith('pool')])\n+ return create_engine(dburi, poolclass=NullPool, **kwargs)\n \n def create_session(self, dburi, short_lived_sessions=False, **kwargs):\n engine = self.get_engine(dburi, **kwargs)\n", "issue": "Unable to use mysql SSL parameters in create_engine()\nPR for proposed fix to this issue: https://github.com/celery/celery/pull/6020\r\n\r\n# Checklist\r\n- [x] I have verified that the issue exists against the `master` branch of Celery.\r\n- [ ] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.\r\n- [x] I have read the relevant section in the\r\n [contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)\r\n on reporting bugs.\r\n- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)\r\n for similar or identical bug reports.\r\n- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)\r\n for existing proposed fixes.\r\n- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)\r\n to find out if the bug was already fixed in the master branch.\r\n- [x] I have included all related issues and possible duplicate issues\r\n in this issue (If there are none, check this box anyway).\r\n\r\n## Mandatory Debugging Information\r\n\r\n- [x] I have included the output of ``celery -A proj report`` in the issue.\r\n (if you are not able to do this, then at least specify the Celery\r\n version affected).\r\n- [x] I have verified that the issue exists against the `master` branch of Celery.\r\n- [x] I have included the contents of ``pip freeze`` in the issue.\r\n- [x] I have included all the versions of all the external dependencies required\r\n to reproduce this bug.\r\n\r\n## Optional Debugging Information\r\n- [ ] I have tried reproducing the issue on more than one Python version\r\n and/or implementation.\r\n- [x] I have tried reproducing the issue on more than one message broker and/or\r\n result backend.\r\n- [ ] I have tried reproducing the issue on more than one version of the message\r\n broker and/or result backend.\r\n- [x] I have tried reproducing the issue on more than one operating system.\r\n- [ ] I have tried reproducing the issue on more than one workers pool.\r\n- [ ] I have tried reproducing the issue with autoscaling, retries,\r\n ETA/Countdown & rate limits disabled.\r\n- [ ] I have tried reproducing the issue after downgrading\r\n and/or upgrading Celery and its dependencies.\r\n\r\n## Related Issues and Possible Duplicates\r\nhttps://github.com/celery/celery/commit/94dae1b899aae6ae2ca333773fddbc6dd603213c \r\nThis PR was made to address the following issue, which has resulted in the issue I am having now. https://github.com/celery/celery/issues/1930 \r\n\r\n#### Related Issues\r\n\r\nhttps://github.com/celery/celery/issues/1930\r\n\r\n#### Possible Duplicates\r\n\r\n- None\r\n\r\n## Environment & Settings\r\n<!-- Include the contents of celery --version below -->\r\n**Celery version**: celery>=4.0.0 (using it in Airflow) \r\n\r\n</p>\r\n</details>\r\n\r\n# Steps to Reproduce\r\n(see Minimally Reproducible Test Case for step by step commands. This contains information leading to the issue and a proposed fix)\r\n\r\nIn Airflow, you can set celery configs. I was setting up cloudsql to use a private IP instead of a proxy. Currently, we use mysql as the `results_backend`. Changing the host address from local host to the private IP caused some errors, as expected. \r\n\r\n```\r\nOperationalError: (_mysql_exceptions.OperationalError) (1045, \"Access denied for user 'airflow'@'10.x.x.xxx' (using password: YES)\")\r\n```\r\nIn order to use the private IP, I need to use the SSL cert, key, and ca. I confirmed that by logging into the Airflow worker and scheduler pods that my url and engine arg params worked. \r\n\r\n```\r\nfrom airflow.models import DagRun \r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.orm import scoped_session, sessionmaker\r\ne = create_engine({AIRFLOW__CELERY__SQL_ALCHEMY_CONN},connect_args= {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}})\r\ns = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=e))\r\ns.query(DagRun).all()\r\n```\r\n\r\nThis worked fine, so I know that the my ssl certs are accessible, the engine can be created, and a session used. Non-celery mysql connections no longer gave an error. \r\n\r\nThe Celery documentation (https://docs.celeryproject.org/en/stable/userguide/configuration.html#conf-database-result-backend) outlines how to add engine args to via `database_engine_options`. Therefore, I added \r\n```\r\n'database_engine_options': {\r\n 'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}}\r\n```\r\nHowever, I still get the same error. \r\n\r\n```\r\nOperationalError: (_mysql_exceptions.OperationalError) (1045, \"Access denied for user 'airflow'@'10.x.x.xxx' (using password: YES)\")\r\n```\r\n\r\n\r\nAdditionally, I get logs in the scheduler like the following:\r\n```\r\n {{__init__.py:56}} WARNING - Failed operation _get_task_meta_for. Retrying 1 more times.\r\n68918-Traceback (most recent call last):\r\n68919- File \"/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py\", line 51, in _inner\r\n68920- return fun(*args, **kwargs)\r\n68921- File \"/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py\", line 154, in _get_task_meta_for\r\n68922: session = self.ResultSession()\r\n68923: File \"/usr/local/lib/python2.7/dist-packages/celery/backends/database/__init__.py\", line 113, in ResultSession\r\n68924- **self.engine_options)\r\n68925- File \"/usr/local/lib/python2.7/dist-packages/celery/backends/database/session.py\", line 59, in session_factory\r\n68926- self.prepare_models(engine)\r\n68927- File \"/usr/local/lib/python2.7/dist-packages/celery/backends/database/session.py\", line 54, in prepare_models\r\n68928- ResultModelBase.metadata.create_all(engine)\r\n```\r\nAfter digging through the code with @dangermike, we noticed that `get_engine` will not use the kwargs passed to it unless it has been forked.(https://github.com/celery/celery/blob/master/celery/backends/database/session.py#L34) Therefore, the SSL params will not be passed in our case. The only place that self.forked = True is after the fork cleanup session. This used to not be the case (https://github.com/celery/celery/commit/94dae1b899aae6ae2ca333773fddbc6dd603213c), but after an issue was made about passing pool_size (https://github.com/celery/celery/issues/1930), `**kwargs` were taken out of create_engine() entirely. \r\nPossibly something like the following would allow for kwargs to be passed in, while still addressing the pool params issue.\r\n\r\n```\r\nclass SessionManager(object):\r\n # ...\r\n def get_engine(self, dburi, **kwargs):\r\n if self.forked:\r\n try:\r\n return self._engines[dburi]\r\n except KeyError:\r\n engine = self._engines[dburi] = create_engine(dburi, **kwargs)\r\n return engine\r\n else:\r\n kwargs = dict([(k, v) for k, v in kwargs.items() if not k.startswith('pool')])\r\n return create_engine(dburi, poolclass=NullPool, **kwargs)\r\n```\r\n\r\nwhere `kwargs = dict([(k, v) for k, v in kwargs.items() if not k.startswith('pool')])` omits any pool args while keeping the rest. \r\n\r\n\r\n## Required Dependencies\r\n<!-- Please fill the required dependencies to reproduce this issue -->\r\n* **Minimal Python Version**: >=2.7\r\n* **Minimal Celery Version**: >=4.0.0\r\n* **Minimal Kombu Version**: N/A or Unknown\r\n* **Minimal Broker Version**: N/A or Unknown\r\n* **Minimal Result Backend Version**: N/A or Unknown\r\n* **Minimal OS and/or Kernel Version**: N/A or Unknown\r\n* **Minimal Broker Client Version**: N/A or Unknown\r\n* **Minimal Result Backend Client Version**: N/A or Unknown\r\n\r\n### Python Packages\r\nUsed Airflow\r\n\r\n### Other Dependencies\r\nN/A\r\n\r\n## Minimally Reproducible Test Case\r\n\r\nIn a python shell, \r\n\r\nget the url with a private mysql IP to make result_backend, giving something like `db+mysql://airflow:***@10.x.xx.xx/airflow`\r\n\r\nand the celery config\r\n```\r\ncelery_configuration = \r\n{'broker_transport_options': {'visibility_timeout': 21600},\r\n 'result_serializer': 'pickle',\r\n 'task_acks_late': True, \r\n'database_engine_options': { 'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}}, \r\n'task_default_queue': 'default',\r\n 'worker_concurrency': 32,\r\n 'worker_prefetch_multiplier': 1, \r\n'event_serializer': 'json', \r\n'accept_content': ['json', 'pickle'], \r\n'broker_url': 'redis://{URL}/1', \r\n'result_backend': 'db+mysql://airflow:***@10.x.xx.xx/airflow', \r\n'task_default_exchange': 'default'}\r\n```\r\nthe line most important here is:\r\n` 'database_engine_options': { 'connect_args': {'ssl': {'ca': '/path-to-mysql-sslcert/server-ca', 'cert': '/path-to-mysql-sslcert/client-cert', 'key': '/path-to-mysql-sslcert/client-key'}}}`\r\n\r\nthen try to connect to result_backend by creating app.\r\n```\r\napp = Celery(celery_app_name=airflow.executors.celery_executor,\r\n config_source=celery_configuration)\r\n```\r\n\r\ncreate a database backend \r\n```\r\ndbbe = database.DatabaseBackend(url={results_backend url without the 'db+' in the beginning}, engine_options=celery_configuration['database_engine_options'], app=app)\r\n```\r\n\r\n\r\nand you will get the error again \r\n\r\n```\r\nsqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1045, \"Access denied for user 'airflow'@'10.xx.xx.xxx' (using password: YES)\")\r\n(Background on this error at: http://sqlalche.me/e/e3q8)\r\n```\r\n\r\n\r\n\r\n# Expected Behavior\r\nIt seems like the expected behavior here would be for the connection to be successful and use the SSL certs in the **kwargs passed into `get_engine`. \r\n\r\n# Actual Behavior\r\nSince self.fork is not True, and will not be True, create_engine is made by:\r\n```\r\n return create_engine(dburi, poolclass=NullPool)\r\n```\r\nsince the SSL certs are not included, an error is returned and the connection is _not_ successful. \r\n\r\n```\r\nsqlalchemy.exc.OperationalError: (_mysql_exceptions.OperationalError) (1045, \"Access denied for user 'airflow'@'10.xx.xx.xxx' (using password: YES)\")\r\n(Background on this error at: http://sqlalche.me/e/e3q8)\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"SQLAlchemy session.\"\"\"\nfrom __future__ import absolute_import, unicode_literals\n\nfrom kombu.utils.compat import register_after_fork\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.pool import NullPool\n\nResultModelBase = declarative_base()\n\n__all__ = ('SessionManager',)\n\n\ndef _after_fork_cleanup_session(session):\n session._after_fork()\n\n\nclass SessionManager(object):\n \"\"\"Manage SQLAlchemy sessions.\"\"\"\n\n def __init__(self):\n self._engines = {}\n self._sessions = {}\n self.forked = False\n self.prepared = False\n if register_after_fork is not None:\n register_after_fork(self, _after_fork_cleanup_session)\n\n def _after_fork(self):\n self.forked = True\n\n def get_engine(self, dburi, **kwargs):\n if self.forked:\n try:\n return self._engines[dburi]\n except KeyError:\n engine = self._engines[dburi] = create_engine(dburi, **kwargs)\n return engine\n else:\n return create_engine(dburi, poolclass=NullPool)\n\n def create_session(self, dburi, short_lived_sessions=False, **kwargs):\n engine = self.get_engine(dburi, **kwargs)\n if self.forked:\n if short_lived_sessions or dburi not in self._sessions:\n self._sessions[dburi] = sessionmaker(bind=engine)\n return engine, self._sessions[dburi]\n return engine, sessionmaker(bind=engine)\n\n def prepare_models(self, engine):\n if not self.prepared:\n ResultModelBase.metadata.create_all(engine)\n self.prepared = True\n\n def session_factory(self, dburi, **kwargs):\n engine, session = self.create_session(dburi, **kwargs)\n self.prepare_models(engine)\n return session()\n", "path": "celery/backends/database/session.py"}]} | 3,827 | 173 |
gh_patches_debug_44511 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-743 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'module' object has no attribute 'meta_of_feedstock'
https://travis-ci.org/conda-forge/staged-recipes/builds/367829917#L880
</issue>
<code>
[start of conda_smithy/github.py]
1 from __future__ import absolute_import, print_function
2
3 import os
4 import random
5 from random import choice
6
7 import git
8 from git import Repo
9
10 import github
11 from github import Github
12 from github.GithubException import GithubException
13 from github.Organization import Organization
14 from github.Team import Team
15
16 from . import configure_feedstock
17
18
19 def gh_token():
20 try:
21 with open(os.path.expanduser('~/.conda-smithy/github.token'), 'r') as fh:
22 token = fh.read().strip()
23 except IOError:
24 msg = ('No github token. Go to https://github.com/settings/tokens/new and generate\n'
25 'a token with repo access. Put it in ~/.conda-smithy/github.token')
26 raise RuntimeError(msg)
27 return token
28
29
30 def create_team(org, name, description, repo_names=[]):
31 # PyGithub creates secret teams, and has no way of turning that off! :(
32 post_parameters = {
33 "name": name,
34 "description": description,
35 "privacy": "closed",
36 "permission": "push",
37 "repo_names": repo_names
38 }
39 headers, data = org._requester.requestJsonAndCheck(
40 "POST",
41 org.url + "/teams",
42 input=post_parameters
43 )
44 return Team(org._requester, headers, data, completed=True)
45
46
47 def add_membership(team, member):
48 headers, data = team._requester.requestJsonAndCheck(
49 "PUT",
50 team.url + "/memberships/" + member
51 )
52 return (headers, data)
53
54
55 def remove_membership(team, member):
56 headers, data = team._requester.requestJsonAndCheck(
57 "DELETE",
58 team.url + "/memberships/" + member
59 )
60 return (headers, data)
61
62
63 def has_in_members(team, member):
64 status, headers, data = team._requester.requestJson(
65 "GET",
66 team.url + "/members/" + member
67 )
68 return status == 204
69
70
71 def get_cached_team(org, team_name, description=""):
72 cached_file = os.path.expanduser('~/.conda-smithy/{}-{}-team'.format(org.login, team_name))
73 try:
74 with open(cached_file, 'r') as fh:
75 team_id = int(fh.read().strip())
76 return org.get_team(team_id)
77 except IOError:
78 pass
79
80 team = next((team for team in org.get_teams() if team.name == team_name), None)
81 if not team:
82 team = create_team(org, team_name, description, [])
83
84 with open(cached_file, 'w') as fh:
85 fh.write(str(team.id))
86
87 return team
88
89
90 def create_github_repo(args):
91 token = gh_token()
92 meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)
93
94 gh = Github(token)
95 user_or_org = None
96 if args.user is not None:
97 pass
98 # User has been defined, and organization has not.
99 user_or_org = gh.get_user()
100 else:
101 # Use the organization provided.
102 user_or_org = gh.get_organization(args.organization)
103
104 repo_name = '{}-feedstock'.format(meta.name())
105 try:
106 gh_repo = user_or_org.create_repo(repo_name, has_wiki=False,
107 description='A conda-smithy repository for {}.'.format(meta.name()))
108 print('Created {} on github'.format(gh_repo.full_name))
109 except GithubException as gh_except:
110 if gh_except.data.get('errors', [{}])[0].get('message', '') != u'name already exists on this account':
111 raise
112 gh_repo = user_or_org.get_repo(repo_name)
113 print('Github repository already exists.')
114
115 # Now add this new repo as a remote on the local clone.
116 repo = Repo(args.feedstock_directory)
117 remote_name = args.remote_name.strip()
118 if remote_name:
119 if remote_name in [remote.name for remote in repo.remotes]:
120 existing_remote = repo.remotes[remote_name]
121 if existing_remote.url != gh_repo.ssh_url:
122 print("Remote {} already exists, and doesn't point to {} "
123 "(it points to {}).".format(remote_name, gh_repo.ssh_url, existing_remote.url))
124 else:
125 repo.create_remote(remote_name, gh_repo.ssh_url)
126
127 if args.add_teams:
128 if isinstance(user_or_org, Organization):
129 configure_github_team(meta, gh_repo, user_or_org)
130
131
132 def configure_github_team(meta, gh_repo, org):
133
134 # Add a team for this repo and add the maintainers to it.
135 superlative = [
136 'awesome', 'slick', 'formidable', 'awe-inspiring',
137 'breathtaking', 'magnificent', 'wonderous', 'stunning',
138 'astonishing', 'superb', 'splendid', 'impressive',
139 'unbeatable', 'excellent', 'top', 'outstanding', 'exalted',
140 'standout', 'smashing'
141 ]
142
143 maintainers = set(
144 meta.meta.get('extra', {}).get('recipe-maintainers', [])
145 )
146 maintainers = set(maintainer.lower() for maintainer in maintainers)
147 team_name = meta.name()
148 # Try to get team or create it if it doesn't exist.
149 team = next((team for team in gh_repo.get_teams() if team.name == team_name), None)
150 current_maintainers = []
151 if not team:
152 team = create_team(
153 org,
154 team_name,
155 'The {} {} contributors!'.format(
156 choice(superlative), team_name
157 )
158 )
159 team.add_to_repos(gh_repo)
160 else:
161 current_maintainers = team.get_members()
162
163
164 # Add only the new maintainers to the team.
165 current_maintainers_handles = set([
166 e.login.lower() for e in current_maintainers
167 ])
168 for new_maintainer in maintainers - current_maintainers_handles:
169 add_membership(team, new_maintainer)
170
171 # Mention any maintainers that need to be removed (unlikely here).
172 for old_maintainer in current_maintainers_handles - maintainers:
173 print(
174 "AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(
175 old_maintainer, repo_name
176 )
177 )
178
179 # Get the all-members team
180 team_name = 'all-members'
181 description = "All of the awesome {} contributors!".format(org.name)
182 all_members_team = get_cached_team(org, team_name, description)
183 new_conda_forge_members = set()
184
185 # Add new members to all-members
186 for new_member in maintainers - current_maintainers_handles:
187 if not has_in_members(all_members_team, new_member):
188 print(
189 "Adding a new member ({}) to {}. Welcome! :)".format(
190 new_member, org.name
191 )
192 )
193 add_membership(all_members_team, new_member)
194 new_conda_forge_members.add(new_member)
195
196 return maintainers, current_maintainers_handles, new_conda_forge_members
197
[end of conda_smithy/github.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_smithy/github.py b/conda_smithy/github.py
--- a/conda_smithy/github.py
+++ b/conda_smithy/github.py
@@ -1,19 +1,16 @@
from __future__ import absolute_import, print_function
import os
-import random
from random import choice
-import git
from git import Repo
-import github
from github import Github
from github.GithubException import GithubException
from github.Organization import Organization
from github.Team import Team
-from . import configure_feedstock
+import conda_build.api
def gh_token():
@@ -89,7 +86,14 @@
def create_github_repo(args):
token = gh_token()
- meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)
+ meta = conda_build.api.render(args.feedstock_directory,
+ permit_undefined_jinja=True, finalize=False,
+ bypass_env_check=True, trim_skip=False)[0][0]
+
+ if "parent_recipe" in meta.meta["extra"]:
+ feedstock_name = meta.meta["extra"]["parent_recipe"]["name"]
+ else:
+ feedstock_name = meta.name()
gh = Github(token)
user_or_org = None
@@ -101,10 +105,11 @@
# Use the organization provided.
user_or_org = gh.get_organization(args.organization)
- repo_name = '{}-feedstock'.format(meta.name())
+ repo_name = '{}-feedstock'.format(feedstock_name)
try:
- gh_repo = user_or_org.create_repo(repo_name, has_wiki=False,
- description='A conda-smithy repository for {}.'.format(meta.name()))
+ gh_repo = user_or_org.create_repo(
+ repo_name, has_wiki=False,
+ description='A conda-smithy repository for {}.'.format(feedstock_name))
print('Created {} on github'.format(gh_repo.full_name))
except GithubException as gh_except:
if gh_except.data.get('errors', [{}])[0].get('message', '') != u'name already exists on this account':
@@ -120,16 +125,17 @@
existing_remote = repo.remotes[remote_name]
if existing_remote.url != gh_repo.ssh_url:
print("Remote {} already exists, and doesn't point to {} "
- "(it points to {}).".format(remote_name, gh_repo.ssh_url, existing_remote.url))
+ "(it points to {}).".format(remote_name, gh_repo.ssh_url,
+ existing_remote.url))
else:
repo.create_remote(remote_name, gh_repo.ssh_url)
if args.add_teams:
if isinstance(user_or_org, Organization):
- configure_github_team(meta, gh_repo, user_or_org)
+ configure_github_team(meta, gh_repo, user_or_org, feedstock_name)
-def configure_github_team(meta, gh_repo, org):
+def configure_github_team(meta, gh_repo, org, feedstock_name):
# Add a team for this repo and add the maintainers to it.
superlative = [
@@ -144,7 +150,7 @@
meta.meta.get('extra', {}).get('recipe-maintainers', [])
)
maintainers = set(maintainer.lower() for maintainer in maintainers)
- team_name = meta.name()
+ team_name = feedstock_name
# Try to get team or create it if it doesn't exist.
team = next((team for team in gh_repo.get_teams() if team.name == team_name), None)
current_maintainers = []
@@ -160,7 +166,6 @@
else:
current_maintainers = team.get_members()
-
# Add only the new maintainers to the team.
current_maintainers_handles = set([
e.login.lower() for e in current_maintainers
@@ -172,7 +177,7 @@
for old_maintainer in current_maintainers_handles - maintainers:
print(
"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}".format(
- old_maintainer, repo_name
+ old_maintainer, gh_repo
)
)
| {"golden_diff": "diff --git a/conda_smithy/github.py b/conda_smithy/github.py\n--- a/conda_smithy/github.py\n+++ b/conda_smithy/github.py\n@@ -1,19 +1,16 @@\n from __future__ import absolute_import, print_function\n \n import os\n-import random\n from random import choice\n \n-import git\n from git import Repo\n \n-import github\n from github import Github\n from github.GithubException import GithubException\n from github.Organization import Organization\n from github.Team import Team\n \n-from . import configure_feedstock\n+import conda_build.api\n \n \n def gh_token():\n@@ -89,7 +86,14 @@\n \n def create_github_repo(args):\n token = gh_token()\n- meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)\n+ meta = conda_build.api.render(args.feedstock_directory,\n+ permit_undefined_jinja=True, finalize=False,\n+ bypass_env_check=True, trim_skip=False)[0][0]\n+\n+ if \"parent_recipe\" in meta.meta[\"extra\"]:\n+ feedstock_name = meta.meta[\"extra\"][\"parent_recipe\"][\"name\"]\n+ else:\n+ feedstock_name = meta.name()\n \n gh = Github(token)\n user_or_org = None\n@@ -101,10 +105,11 @@\n # Use the organization provided.\n user_or_org = gh.get_organization(args.organization)\n \n- repo_name = '{}-feedstock'.format(meta.name())\n+ repo_name = '{}-feedstock'.format(feedstock_name)\n try:\n- gh_repo = user_or_org.create_repo(repo_name, has_wiki=False,\n- description='A conda-smithy repository for {}.'.format(meta.name()))\n+ gh_repo = user_or_org.create_repo(\n+ repo_name, has_wiki=False,\n+ description='A conda-smithy repository for {}.'.format(feedstock_name))\n print('Created {} on github'.format(gh_repo.full_name))\n except GithubException as gh_except:\n if gh_except.data.get('errors', [{}])[0].get('message', '') != u'name already exists on this account':\n@@ -120,16 +125,17 @@\n existing_remote = repo.remotes[remote_name]\n if existing_remote.url != gh_repo.ssh_url:\n print(\"Remote {} already exists, and doesn't point to {} \"\n- \"(it points to {}).\".format(remote_name, gh_repo.ssh_url, existing_remote.url))\n+ \"(it points to {}).\".format(remote_name, gh_repo.ssh_url,\n+ existing_remote.url))\n else:\n repo.create_remote(remote_name, gh_repo.ssh_url)\n \n if args.add_teams:\n if isinstance(user_or_org, Organization):\n- configure_github_team(meta, gh_repo, user_or_org)\n+ configure_github_team(meta, gh_repo, user_or_org, feedstock_name)\n \n \n-def configure_github_team(meta, gh_repo, org):\n+def configure_github_team(meta, gh_repo, org, feedstock_name):\n \n # Add a team for this repo and add the maintainers to it.\n superlative = [\n@@ -144,7 +150,7 @@\n meta.meta.get('extra', {}).get('recipe-maintainers', [])\n )\n maintainers = set(maintainer.lower() for maintainer in maintainers)\n- team_name = meta.name()\n+ team_name = feedstock_name\n # Try to get team or create it if it doesn't exist.\n team = next((team for team in gh_repo.get_teams() if team.name == team_name), None)\n current_maintainers = []\n@@ -160,7 +166,6 @@\n else:\n current_maintainers = team.get_members()\n \n-\n # Add only the new maintainers to the team.\n current_maintainers_handles = set([\n e.login.lower() for e in current_maintainers\n@@ -172,7 +177,7 @@\n for old_maintainer in current_maintainers_handles - maintainers:\n print(\n \"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}\".format(\n- old_maintainer, repo_name\n+ old_maintainer, gh_repo\n )\n )\n", "issue": "AttributeError: 'module' object has no attribute 'meta_of_feedstock'\nhttps://travis-ci.org/conda-forge/staged-recipes/builds/367829917#L880\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\n\nimport os\nimport random\nfrom random import choice\n\nimport git\nfrom git import Repo\n\nimport github\nfrom github import Github\nfrom github.GithubException import GithubException\nfrom github.Organization import Organization\nfrom github.Team import Team\n\nfrom . import configure_feedstock\n\n\ndef gh_token():\n try:\n with open(os.path.expanduser('~/.conda-smithy/github.token'), 'r') as fh:\n token = fh.read().strip()\n except IOError:\n msg = ('No github token. Go to https://github.com/settings/tokens/new and generate\\n'\n 'a token with repo access. Put it in ~/.conda-smithy/github.token')\n raise RuntimeError(msg)\n return token\n\n\ndef create_team(org, name, description, repo_names=[]):\n # PyGithub creates secret teams, and has no way of turning that off! :(\n post_parameters = {\n \"name\": name,\n \"description\": description,\n \"privacy\": \"closed\",\n \"permission\": \"push\",\n \"repo_names\": repo_names\n }\n headers, data = org._requester.requestJsonAndCheck(\n \"POST\",\n org.url + \"/teams\",\n input=post_parameters\n )\n return Team(org._requester, headers, data, completed=True)\n\n\ndef add_membership(team, member):\n headers, data = team._requester.requestJsonAndCheck(\n \"PUT\",\n team.url + \"/memberships/\" + member\n )\n return (headers, data)\n\n\ndef remove_membership(team, member):\n headers, data = team._requester.requestJsonAndCheck(\n \"DELETE\",\n team.url + \"/memberships/\" + member\n )\n return (headers, data)\n\n\ndef has_in_members(team, member):\n status, headers, data = team._requester.requestJson(\n \"GET\",\n team.url + \"/members/\" + member\n )\n return status == 204\n\n\ndef get_cached_team(org, team_name, description=\"\"):\n cached_file = os.path.expanduser('~/.conda-smithy/{}-{}-team'.format(org.login, team_name))\n try:\n with open(cached_file, 'r') as fh:\n team_id = int(fh.read().strip())\n return org.get_team(team_id)\n except IOError:\n pass\n\n team = next((team for team in org.get_teams() if team.name == team_name), None)\n if not team:\n team = create_team(org, team_name, description, [])\n\n with open(cached_file, 'w') as fh:\n fh.write(str(team.id))\n\n return team\n\n\ndef create_github_repo(args):\n token = gh_token()\n meta = configure_feedstock.meta_of_feedstock(args.feedstock_directory)\n\n gh = Github(token)\n user_or_org = None\n if args.user is not None:\n pass\n # User has been defined, and organization has not.\n user_or_org = gh.get_user()\n else:\n # Use the organization provided.\n user_or_org = gh.get_organization(args.organization)\n\n repo_name = '{}-feedstock'.format(meta.name())\n try:\n gh_repo = user_or_org.create_repo(repo_name, has_wiki=False,\n description='A conda-smithy repository for {}.'.format(meta.name()))\n print('Created {} on github'.format(gh_repo.full_name))\n except GithubException as gh_except:\n if gh_except.data.get('errors', [{}])[0].get('message', '') != u'name already exists on this account':\n raise\n gh_repo = user_or_org.get_repo(repo_name)\n print('Github repository already exists.')\n\n # Now add this new repo as a remote on the local clone.\n repo = Repo(args.feedstock_directory)\n remote_name = args.remote_name.strip()\n if remote_name:\n if remote_name in [remote.name for remote in repo.remotes]:\n existing_remote = repo.remotes[remote_name]\n if existing_remote.url != gh_repo.ssh_url:\n print(\"Remote {} already exists, and doesn't point to {} \"\n \"(it points to {}).\".format(remote_name, gh_repo.ssh_url, existing_remote.url))\n else:\n repo.create_remote(remote_name, gh_repo.ssh_url)\n\n if args.add_teams:\n if isinstance(user_or_org, Organization):\n configure_github_team(meta, gh_repo, user_or_org)\n\n\ndef configure_github_team(meta, gh_repo, org):\n\n # Add a team for this repo and add the maintainers to it.\n superlative = [\n 'awesome', 'slick', 'formidable', 'awe-inspiring',\n 'breathtaking', 'magnificent', 'wonderous', 'stunning',\n 'astonishing', 'superb', 'splendid', 'impressive',\n 'unbeatable', 'excellent', 'top', 'outstanding', 'exalted',\n 'standout', 'smashing'\n ]\n\n maintainers = set(\n meta.meta.get('extra', {}).get('recipe-maintainers', [])\n )\n maintainers = set(maintainer.lower() for maintainer in maintainers)\n team_name = meta.name()\n # Try to get team or create it if it doesn't exist.\n team = next((team for team in gh_repo.get_teams() if team.name == team_name), None)\n current_maintainers = []\n if not team:\n team = create_team(\n org,\n team_name,\n 'The {} {} contributors!'.format(\n choice(superlative), team_name\n )\n )\n team.add_to_repos(gh_repo)\n else:\n current_maintainers = team.get_members()\n\n\n # Add only the new maintainers to the team.\n current_maintainers_handles = set([\n e.login.lower() for e in current_maintainers\n ])\n for new_maintainer in maintainers - current_maintainers_handles:\n add_membership(team, new_maintainer)\n\n # Mention any maintainers that need to be removed (unlikely here).\n for old_maintainer in current_maintainers_handles - maintainers:\n print(\n \"AN OLD MEMBER ({}) NEEDS TO BE REMOVED FROM {}\".format(\n old_maintainer, repo_name\n )\n )\n\n # Get the all-members team\n team_name = 'all-members'\n description = \"All of the awesome {} contributors!\".format(org.name)\n all_members_team = get_cached_team(org, team_name, description)\n new_conda_forge_members = set()\n\n # Add new members to all-members\n for new_member in maintainers - current_maintainers_handles:\n if not has_in_members(all_members_team, new_member):\n print(\n \"Adding a new member ({}) to {}. Welcome! :)\".format(\n new_member, org.name\n )\n )\n add_membership(all_members_team, new_member)\n new_conda_forge_members.add(new_member)\n\n return maintainers, current_maintainers_handles, new_conda_forge_members\n", "path": "conda_smithy/github.py"}]} | 2,603 | 933 |
gh_patches_debug_25159 | rasdani/github-patches | git_diff | mlflow__mlflow-9258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Typo fix
https://github.com/mlflow/mlflow/blob/9724c83bd8f0100c465e68e30651a9727de42ce0/dev/show_package_release_dates.py#L49
`package_legnth` -> `package_length`
</issue>
<code>
[start of dev/show_package_release_dates.py]
1 import os
2 import json
3 import sys
4 import subprocess
5 import requests
6 from concurrent.futures import ThreadPoolExecutor
7 import traceback
8
9
10 def get_distributions():
11 res = subprocess.check_output(
12 [sys.executable, "-m", "pip", "list", "--format", "json"], text=True
13 )
14 return [(pkg["name"], pkg["version"]) for pkg in json.loads(res)]
15
16
17 def get_release_date(package, version):
18 resp = requests.get(f"https://pypi.python.org/pypi/{package}/json", timeout=10)
19 if not resp.ok:
20 return ""
21
22 matched = [dist_files for ver, dist_files in resp.json()["releases"].items() if ver == version]
23 if (not matched) or (not matched[0]):
24 return ""
25
26 upload_time = matched[0][0]["upload_time"]
27 return upload_time.split("T")[0] # return year-month-day
28
29
30 def get_longest_string_length(array):
31 return len(max(array, key=len))
32
33
34 def safe_result(future, if_error=""):
35 try:
36 return future.result()
37 except Exception:
38 traceback.print_exc()
39 return if_error
40
41
42 def main():
43 distributions = get_distributions()
44 with ThreadPoolExecutor(max_workers=min(32, os.cpu_count() + 4)) as executor:
45 futures = [executor.submit(get_release_date, pkg, ver) for pkg, ver in distributions]
46 release_dates = [safe_result(f) for f in futures]
47
48 packages, versions = list(zip(*distributions))
49 package_legnth = get_longest_string_length(packages)
50 version_length = get_longest_string_length(versions)
51 release_date_length = len("Release Date")
52 print("Package".ljust(package_legnth), "Version".ljust(version_length), "Release Date")
53 print("-" * (package_legnth + version_length + release_date_length + 2))
54 for package, version, release_date in sorted(
55 zip(packages, versions, release_dates),
56 # Sort by release date in descending order
57 key=lambda x: x[2],
58 reverse=True,
59 ):
60 print(
61 package.ljust(package_legnth),
62 version.ljust(version_length),
63 release_date.ljust(release_date_length),
64 )
65
66
67 if __name__ == "__main__":
68 main()
69
[end of dev/show_package_release_dates.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dev/show_package_release_dates.py b/dev/show_package_release_dates.py
--- a/dev/show_package_release_dates.py
+++ b/dev/show_package_release_dates.py
@@ -46,11 +46,11 @@
release_dates = [safe_result(f) for f in futures]
packages, versions = list(zip(*distributions))
- package_legnth = get_longest_string_length(packages)
+ package_length = get_longest_string_length(packages)
version_length = get_longest_string_length(versions)
release_date_length = len("Release Date")
- print("Package".ljust(package_legnth), "Version".ljust(version_length), "Release Date")
- print("-" * (package_legnth + version_length + release_date_length + 2))
+ print("Package".ljust(package_length), "Version".ljust(version_length), "Release Date")
+ print("-" * (package_length + version_length + release_date_length + 2))
for package, version, release_date in sorted(
zip(packages, versions, release_dates),
# Sort by release date in descending order
@@ -58,7 +58,7 @@
reverse=True,
):
print(
- package.ljust(package_legnth),
+ package.ljust(package_length),
version.ljust(version_length),
release_date.ljust(release_date_length),
)
| {"golden_diff": "diff --git a/dev/show_package_release_dates.py b/dev/show_package_release_dates.py\n--- a/dev/show_package_release_dates.py\n+++ b/dev/show_package_release_dates.py\n@@ -46,11 +46,11 @@\n release_dates = [safe_result(f) for f in futures]\n \n packages, versions = list(zip(*distributions))\n- package_legnth = get_longest_string_length(packages)\n+ package_length = get_longest_string_length(packages)\n version_length = get_longest_string_length(versions)\n release_date_length = len(\"Release Date\")\n- print(\"Package\".ljust(package_legnth), \"Version\".ljust(version_length), \"Release Date\")\n- print(\"-\" * (package_legnth + version_length + release_date_length + 2))\n+ print(\"Package\".ljust(package_length), \"Version\".ljust(version_length), \"Release Date\")\n+ print(\"-\" * (package_length + version_length + release_date_length + 2))\n for package, version, release_date in sorted(\n zip(packages, versions, release_dates),\n # Sort by release date in descending order\n@@ -58,7 +58,7 @@\n reverse=True,\n ):\n print(\n- package.ljust(package_legnth),\n+ package.ljust(package_length),\n version.ljust(version_length),\n release_date.ljust(release_date_length),\n )\n", "issue": "Typo fix\nhttps://github.com/mlflow/mlflow/blob/9724c83bd8f0100c465e68e30651a9727de42ce0/dev/show_package_release_dates.py#L49\r\n\r\n`package_legnth` -> `package_length`\n", "before_files": [{"content": "import os\nimport json\nimport sys\nimport subprocess\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor\nimport traceback\n\n\ndef get_distributions():\n res = subprocess.check_output(\n [sys.executable, \"-m\", \"pip\", \"list\", \"--format\", \"json\"], text=True\n )\n return [(pkg[\"name\"], pkg[\"version\"]) for pkg in json.loads(res)]\n\n\ndef get_release_date(package, version):\n resp = requests.get(f\"https://pypi.python.org/pypi/{package}/json\", timeout=10)\n if not resp.ok:\n return \"\"\n\n matched = [dist_files for ver, dist_files in resp.json()[\"releases\"].items() if ver == version]\n if (not matched) or (not matched[0]):\n return \"\"\n\n upload_time = matched[0][0][\"upload_time\"]\n return upload_time.split(\"T\")[0] # return year-month-day\n\n\ndef get_longest_string_length(array):\n return len(max(array, key=len))\n\n\ndef safe_result(future, if_error=\"\"):\n try:\n return future.result()\n except Exception:\n traceback.print_exc()\n return if_error\n\n\ndef main():\n distributions = get_distributions()\n with ThreadPoolExecutor(max_workers=min(32, os.cpu_count() + 4)) as executor:\n futures = [executor.submit(get_release_date, pkg, ver) for pkg, ver in distributions]\n release_dates = [safe_result(f) for f in futures]\n\n packages, versions = list(zip(*distributions))\n package_legnth = get_longest_string_length(packages)\n version_length = get_longest_string_length(versions)\n release_date_length = len(\"Release Date\")\n print(\"Package\".ljust(package_legnth), \"Version\".ljust(version_length), \"Release Date\")\n print(\"-\" * (package_legnth + version_length + release_date_length + 2))\n for package, version, release_date in sorted(\n zip(packages, versions, release_dates),\n # Sort by release date in descending order\n key=lambda x: x[2],\n reverse=True,\n ):\n print(\n package.ljust(package_legnth),\n version.ljust(version_length),\n release_date.ljust(release_date_length),\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "dev/show_package_release_dates.py"}]} | 1,239 | 301 |
gh_patches_debug_27386 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-8360 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Problems with Spiders built on YextSpider
I've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .
A common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.
If this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?
</issue>
<code>
[start of locations/spiders/independent_financial_us.py]
1 from locations.categories import Categories, apply_category
2 from locations.storefinders.yext import YextSpider
3
4
5 class IndependentFinancialUSSpider(YextSpider):
6 name = "independent_financial_us"
7 item_attributes = {"brand": "Independent Financial", "brand_wikidata": "Q6016398"}
8 api_key = "ee4600854cf5501c53831bf944472e57"
9 wanted_types = ["location", "atm"]
10
11 def parse_item(self, item, location):
12 if location["meta"]["entityType"] == "location":
13 apply_category(Categories.BANK, item)
14 item["ref"] = location.get("c_branchCode", location["meta"].get("id"))
15 item["name"] = " ".join(filter(None, [location.get("name"), location.get("geomodifier")]))
16 elif location["meta"]["entityType"] == "atm":
17 apply_category(Categories.ATM, item)
18 item["name"] = location.get("geomodifier")
19 item["website"] = location.get("c_pagesURL")
20 item.pop("email", None)
21 item["extras"].pop("contact:instagram", None)
22 item.pop("twitter", None)
23 item.pop("facebook", None)
24 yield item
25
[end of locations/spiders/independent_financial_us.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/independent_financial_us.py b/locations/spiders/independent_financial_us.py
--- a/locations/spiders/independent_financial_us.py
+++ b/locations/spiders/independent_financial_us.py
@@ -1,24 +1,18 @@
from locations.categories import Categories, apply_category
-from locations.storefinders.yext import YextSpider
+from locations.storefinders.yext_answers import YextAnswersSpider
-class IndependentFinancialUSSpider(YextSpider):
+class IndependentFinancialUSSpider(YextAnswersSpider):
name = "independent_financial_us"
item_attributes = {"brand": "Independent Financial", "brand_wikidata": "Q6016398"}
api_key = "ee4600854cf5501c53831bf944472e57"
- wanted_types = ["location", "atm"]
+ experience_key = "independent-financial-search"
- def parse_item(self, item, location):
- if location["meta"]["entityType"] == "location":
- apply_category(Categories.BANK, item)
- item["ref"] = location.get("c_branchCode", location["meta"].get("id"))
- item["name"] = " ".join(filter(None, [location.get("name"), location.get("geomodifier")]))
- elif location["meta"]["entityType"] == "atm":
+ def parse_item(self, location, item):
+ if location["data"]["type"] == "atm":
apply_category(Categories.ATM, item)
- item["name"] = location.get("geomodifier")
- item["website"] = location.get("c_pagesURL")
- item.pop("email", None)
- item["extras"].pop("contact:instagram", None)
- item.pop("twitter", None)
- item.pop("facebook", None)
+ elif location["data"]["type"] == "location":
+ apply_category(Categories.BANK, item)
+ else:
+ self.logger.error("Unknown location type: {}".format(location["data"]["type"]))
yield item
| {"golden_diff": "diff --git a/locations/spiders/independent_financial_us.py b/locations/spiders/independent_financial_us.py\n--- a/locations/spiders/independent_financial_us.py\n+++ b/locations/spiders/independent_financial_us.py\n@@ -1,24 +1,18 @@\n from locations.categories import Categories, apply_category\n-from locations.storefinders.yext import YextSpider\n+from locations.storefinders.yext_answers import YextAnswersSpider\n \n \n-class IndependentFinancialUSSpider(YextSpider):\n+class IndependentFinancialUSSpider(YextAnswersSpider):\n name = \"independent_financial_us\"\n item_attributes = {\"brand\": \"Independent Financial\", \"brand_wikidata\": \"Q6016398\"}\n api_key = \"ee4600854cf5501c53831bf944472e57\"\n- wanted_types = [\"location\", \"atm\"]\n+ experience_key = \"independent-financial-search\"\n \n- def parse_item(self, item, location):\n- if location[\"meta\"][\"entityType\"] == \"location\":\n- apply_category(Categories.BANK, item)\n- item[\"ref\"] = location.get(\"c_branchCode\", location[\"meta\"].get(\"id\"))\n- item[\"name\"] = \" \".join(filter(None, [location.get(\"name\"), location.get(\"geomodifier\")]))\n- elif location[\"meta\"][\"entityType\"] == \"atm\":\n+ def parse_item(self, location, item):\n+ if location[\"data\"][\"type\"] == \"atm\":\n apply_category(Categories.ATM, item)\n- item[\"name\"] = location.get(\"geomodifier\")\n- item[\"website\"] = location.get(\"c_pagesURL\")\n- item.pop(\"email\", None)\n- item[\"extras\"].pop(\"contact:instagram\", None)\n- item.pop(\"twitter\", None)\n- item.pop(\"facebook\", None)\n+ elif location[\"data\"][\"type\"] == \"location\":\n+ apply_category(Categories.BANK, item)\n+ else:\n+ self.logger.error(\"Unknown location type: {}\".format(location[\"data\"][\"type\"]))\n yield item\n", "issue": "Problems with Spiders built on YextSpider\nI've noticed a few of the spiders whose results I was using in my OSM tool have recently stopped returning any results, including five_guys_de_es_fr_gb.py , matalan_gb.py , and three_gb.py .\r\n\r\nA common feature of these spiders is that they use the YextSpider class. Checking some other spiders that are also based on this class, reveals they've also stopped working. The spider stats suggest 404 and 403 errors are preventing the data being retrieved.\r\n\r\nIf this is a general problem affecting multiple spiders, would someone be able to take a look and see if it can be fixed?\n", "before_files": [{"content": "from locations.categories import Categories, apply_category\nfrom locations.storefinders.yext import YextSpider\n\n\nclass IndependentFinancialUSSpider(YextSpider):\n name = \"independent_financial_us\"\n item_attributes = {\"brand\": \"Independent Financial\", \"brand_wikidata\": \"Q6016398\"}\n api_key = \"ee4600854cf5501c53831bf944472e57\"\n wanted_types = [\"location\", \"atm\"]\n\n def parse_item(self, item, location):\n if location[\"meta\"][\"entityType\"] == \"location\":\n apply_category(Categories.BANK, item)\n item[\"ref\"] = location.get(\"c_branchCode\", location[\"meta\"].get(\"id\"))\n item[\"name\"] = \" \".join(filter(None, [location.get(\"name\"), location.get(\"geomodifier\")]))\n elif location[\"meta\"][\"entityType\"] == \"atm\":\n apply_category(Categories.ATM, item)\n item[\"name\"] = location.get(\"geomodifier\")\n item[\"website\"] = location.get(\"c_pagesURL\")\n item.pop(\"email\", None)\n item[\"extras\"].pop(\"contact:instagram\", None)\n item.pop(\"twitter\", None)\n item.pop(\"facebook\", None)\n yield item\n", "path": "locations/spiders/independent_financial_us.py"}]} | 1,015 | 477 |
gh_patches_debug_39027 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-140 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Tenseal dependency for HE is not available on ARM aarch64
The tenseal dependency is not available for the ARM aarch64 platform, causing installation to fail. This has been reported for local development on Mac M1 and will affect other non-x86 architectures, Jetson, Clara AGX, IBM POWER, etc..
The tenseal dependency is only required when using the HEBuilder module, and it looks like all other functionality could be used without this dependency. Can tenseal be made optional, with the caveat that HE is not available without tenseal?
One option would be providing an alternate install, a `requirements-no-tenseal.txt` that includes everything but tenseal. For example, I generated this file in a clean venv on my linux machine using:
```
pip download nvflare -d /tmp -v \
| grep Collecting \
| awk '{print $2}' \
| tr '[:upper:]' '[:lower:]' \
| grep -v tenseal \
| tee requirements-no-tenseal.txt
```
and verified that I can install nvflare and all deps except tenseal by copying to an aarch64 system (in this case a Jetson TX2) with:
```
python3 -m pip install --no-deps -r requirements-no-tenseal.txt
```
This is a pretty awkward solution. It would be much cleaner to remove the tenseal dependency in the default packaging, since HE is optional, and note in the docs that tenseal must be installed when using HE.
</issue>
<code>
[start of setup.py]
1 # Copyright (c) 2021, NVIDIA CORPORATION.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
16 import os
17 import shutil
18 from datetime import datetime
19
20 from setuptools import find_packages, setup
21
22 import versioneer
23 # read the contents of your README file
24 this_directory = os.path.abspath(os.path.dirname(__file__))
25 with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
26 long_description = f.read()
27
28 # with open(os.path.join(this_directory, "nvflare", "__init__.py"), encoding="utf-8") as f:
29 # for line in f.readlines():
30 # if "__version__" in line:
31 # init_version = line.split("=")[1].strip().strip('"')
32 #
33 # nvfl_version = os.environ.get("NVFL_VERSION", init_version)
34 # yymmdd = datetime.today().strftime("%y%m%d")
35 # nvfl_nightly_version = f"{nvfl_version}.dev{yymmdd}"
36 #
37 # if os.environ.get("NVFL_RELEASE"):
38 # package_name = "nvflare"
39 # version = nvfl_version
40 # else:
41 # package_name = "nvflare-nightly"
42 # version = nvfl_nightly_version
43 #
44
45 if os.path.exists(os.path.join(this_directory, "nvflare", "poc.zip")):
46 os.remove(os.path.join(this_directory, "nvflare", "poc.zip"))
47 shutil.make_archive(base_name="poc", format="zip", root_dir=os.path.join(this_directory, "nvflare"), base_dir="poc")
48 shutil.move("poc.zip", os.path.join(this_directory, "nvflare", "poc.zip"))
49 package_name = "nvflare"
50
51 setup(
52 name=package_name,
53 version=versioneer.get_version(),
54 cmdclass=versioneer.get_cmdclass(),
55 description="Federated Learning Application Runtime Environment",
56 url="https://github.com/NVIDIA/NVFlare",
57 package_dir={"nvflare": "nvflare"},
58 packages=find_packages(
59 where=".",
60 include=[
61 "*",
62 ],
63 exclude=[
64 "test",
65 ],
66 ),
67 package_data={"": ["*.yml", "*.html", "poc.zip"]},
68 zip_safe=True,
69 license_files=("LICENSE.pdf",),
70 classifiers=[
71 "Programming Language :: Python :: 3",
72 "License :: Other/Proprietary License",
73 "Operating System :: OS Independent",
74 ],
75 long_description=long_description,
76 long_description_content_type="text/markdown",
77 python_requires=">=3.7",
78 # install_requires=list(pkutils.parse_requirements("requirements.txt")),
79 install_requires=[
80 "PyYAML",
81 "psutil",
82 "numpy",
83 "grpcio",
84 "google-api-python-client",
85 "cryptography",
86 "tenseal==0.3.0",
87 ],
88 entry_points={
89 "console_scripts": [
90 "provision=nvflare.lighter.provision:main",
91 "poc=nvflare.lighter.poc:main",
92 "authz_preview=nvflare.fuel.hci.tools.authz_preview:main",
93 ],
94 },
95 )
96
97 os.remove(os.path.join(this_directory, "nvflare", "poc.zip"))
98
[end of setup.py]
[start of nvflare/lighter/impl/he.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16
17 import tenseal as ts
18
19 from nvflare.lighter.spec import Builder
20
21
22 class HEBuilder(Builder):
23 def __init__(
24 self,
25 poly_modulus_degree=8192,
26 coeff_mod_bit_sizes=[60, 40, 40],
27 scale_bits=40,
28 scheme="CKKS",
29 ):
30 """Build Homomorphic related contents.
31
32 Generates Tenseal homomorphic encryption context for server and client and writes them to server and client
33 participant folders.
34
35 Args:
36 poly_modulus_degree: defaults to 8192.
37 coeff_mod_bit_sizes: defaults to [60, 40, 40].
38 scale_bits: defaults to 40.
39 scheme: defaults to "CKKS".
40 """
41 self._context = None
42 self.scheme_type_mapping = {
43 "CKKS": ts.SCHEME_TYPE.CKKS,
44 "BFV": ts.SCHEME_TYPE.BFV,
45 }
46 self.poly_modulus_degree = poly_modulus_degree
47 self.coeff_mod_bit_sizes = coeff_mod_bit_sizes
48 self.scale_bits = scale_bits
49 _scheme = scheme
50 # Setup TenSEAL context
51 self.scheme_type = self.scheme_type_mapping[_scheme]
52 self.serialized = None
53
54 def initialize(self, ctx):
55 self._context = ts.context(
56 self.scheme_type,
57 poly_modulus_degree=self.poly_modulus_degree,
58 coeff_mod_bit_sizes=self.coeff_mod_bit_sizes,
59 encryption_type=ts.ENCRYPTION_TYPE.SYMMETRIC,
60 )
61 # dynamically call different generate keys method
62 # getattr(self._context, f'generate_{self.key_type}_keys')()
63 self._context.generate_relin_keys()
64 self._context.global_scale = 2 ** self.scale_bits
65
66 def build(self, study, ctx):
67 server = study.get_participants_by_type("server")
68 dest_dir = self.get_kit_dir(server, ctx)
69 with open(os.path.join(dest_dir, "server_context.tenseal"), "wb") as f:
70 f.write(self.get_serialized_context())
71 for client in study.get_participants_by_type("client", first_only=False):
72 dest_dir = self.get_kit_dir(client, ctx)
73 with open(os.path.join(dest_dir, "client_context.tenseal"), "wb") as f:
74 f.write(self.get_serialized_context(is_client=True))
75
76 def get_serialized_context(self, is_client=False):
77 _serialized_context = self._context.serialize(
78 save_public_key=is_client,
79 save_secret_key=is_client,
80 save_galois_keys=False,
81 save_relin_keys=True,
82 )
83 return _serialized_context
84
[end of nvflare/lighter/impl/he.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/lighter/impl/he.py b/nvflare/lighter/impl/he.py
--- a/nvflare/lighter/impl/he.py
+++ b/nvflare/lighter/impl/he.py
@@ -14,7 +14,10 @@
import os
-import tenseal as ts
+try:
+ import tenseal as ts
+except ImportError:
+ ts = None
from nvflare.lighter.spec import Builder
@@ -38,6 +41,11 @@
scale_bits: defaults to 40.
scheme: defaults to "CKKS".
"""
+ if ts is None:
+ print(
+ "\n *** tenseal is not installed. HEBuilder is ignored and no tenseal files will be generated. ***\n"
+ )
+ return
self._context = None
self.scheme_type_mapping = {
"CKKS": ts.SCHEME_TYPE.CKKS,
@@ -52,6 +60,8 @@
self.serialized = None
def initialize(self, ctx):
+ if ts is None:
+ return
self._context = ts.context(
self.scheme_type,
poly_modulus_degree=self.poly_modulus_degree,
@@ -64,6 +74,8 @@
self._context.global_scale = 2 ** self.scale_bits
def build(self, study, ctx):
+ if ts is None:
+ return
server = study.get_participants_by_type("server")
dest_dir = self.get_kit_dir(server, ctx)
with open(os.path.join(dest_dir, "server_context.tenseal"), "wb") as f:
@@ -74,6 +86,8 @@
f.write(self.get_serialized_context(is_client=True))
def get_serialized_context(self, is_client=False):
+ if ts is None:
+ return
_serialized_context = self._context.serialize(
save_public_key=is_client,
save_secret_key=is_client,
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -76,15 +76,8 @@
long_description_content_type="text/markdown",
python_requires=">=3.7",
# install_requires=list(pkutils.parse_requirements("requirements.txt")),
- install_requires=[
- "PyYAML",
- "psutil",
- "numpy",
- "grpcio",
- "google-api-python-client",
- "cryptography",
- "tenseal==0.3.0",
- ],
+ install_requires=["PyYAML", "psutil", "numpy", "grpcio", "google-api-python-client", "cryptography"],
+ extras_require={"HE": ["tenseal==0.3.0"]},
entry_points={
"console_scripts": [
"provision=nvflare.lighter.provision:main",
| {"golden_diff": "diff --git a/nvflare/lighter/impl/he.py b/nvflare/lighter/impl/he.py\n--- a/nvflare/lighter/impl/he.py\n+++ b/nvflare/lighter/impl/he.py\n@@ -14,7 +14,10 @@\n \n import os\n \n-import tenseal as ts\n+try:\n+ import tenseal as ts\n+except ImportError:\n+ ts = None\n \n from nvflare.lighter.spec import Builder\n \n@@ -38,6 +41,11 @@\n scale_bits: defaults to 40.\n scheme: defaults to \"CKKS\".\n \"\"\"\n+ if ts is None:\n+ print(\n+ \"\\n *** tenseal is not installed. HEBuilder is ignored and no tenseal files will be generated. ***\\n\"\n+ )\n+ return\n self._context = None\n self.scheme_type_mapping = {\n \"CKKS\": ts.SCHEME_TYPE.CKKS,\n@@ -52,6 +60,8 @@\n self.serialized = None\n \n def initialize(self, ctx):\n+ if ts is None:\n+ return\n self._context = ts.context(\n self.scheme_type,\n poly_modulus_degree=self.poly_modulus_degree,\n@@ -64,6 +74,8 @@\n self._context.global_scale = 2 ** self.scale_bits\n \n def build(self, study, ctx):\n+ if ts is None:\n+ return\n server = study.get_participants_by_type(\"server\")\n dest_dir = self.get_kit_dir(server, ctx)\n with open(os.path.join(dest_dir, \"server_context.tenseal\"), \"wb\") as f:\n@@ -74,6 +86,8 @@\n f.write(self.get_serialized_context(is_client=True))\n \n def get_serialized_context(self, is_client=False):\n+ if ts is None:\n+ return\n _serialized_context = self._context.serialize(\n save_public_key=is_client,\n save_secret_key=is_client,\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -76,15 +76,8 @@\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3.7\",\n # install_requires=list(pkutils.parse_requirements(\"requirements.txt\")),\n- install_requires=[\n- \"PyYAML\",\n- \"psutil\",\n- \"numpy\",\n- \"grpcio\",\n- \"google-api-python-client\",\n- \"cryptography\",\n- \"tenseal==0.3.0\",\n- ],\n+ install_requires=[\"PyYAML\", \"psutil\", \"numpy\", \"grpcio\", \"google-api-python-client\", \"cryptography\"],\n+ extras_require={\"HE\": [\"tenseal==0.3.0\"]},\n entry_points={\n \"console_scripts\": [\n \"provision=nvflare.lighter.provision:main\",\n", "issue": "Tenseal dependency for HE is not available on ARM aarch64\nThe tenseal dependency is not available for the ARM aarch64 platform, causing installation to fail. This has been reported for local development on Mac M1 and will affect other non-x86 architectures, Jetson, Clara AGX, IBM POWER, etc..\r\n\r\nThe tenseal dependency is only required when using the HEBuilder module, and it looks like all other functionality could be used without this dependency. Can tenseal be made optional, with the caveat that HE is not available without tenseal?\r\n\r\nOne option would be providing an alternate install, a `requirements-no-tenseal.txt` that includes everything but tenseal. For example, I generated this file in a clean venv on my linux machine using:\r\n```\r\npip download nvflare -d /tmp -v \\\r\n | grep Collecting \\\r\n | awk '{print $2}' \\\r\n | tr '[:upper:]' '[:lower:]' \\\r\n | grep -v tenseal \\\r\n | tee requirements-no-tenseal.txt\r\n```\r\nand verified that I can install nvflare and all deps except tenseal by copying to an aarch64 system (in this case a Jetson TX2) with:\r\n```\r\npython3 -m pip install --no-deps -r requirements-no-tenseal.txt\r\n```\r\n\r\nThis is a pretty awkward solution. It would be much cleaner to remove the tenseal dependency in the default packaging, since HE is optional, and note in the docs that tenseal must be installed when using HE.\n", "before_files": [{"content": "# Copyright (c) 2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\nimport os\nimport shutil\nfrom datetime import datetime\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n# read the contents of your README file\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\n# with open(os.path.join(this_directory, \"nvflare\", \"__init__.py\"), encoding=\"utf-8\") as f:\n# for line in f.readlines():\n# if \"__version__\" in line:\n# init_version = line.split(\"=\")[1].strip().strip('\"')\n# \n# nvfl_version = os.environ.get(\"NVFL_VERSION\", init_version)\n# yymmdd = datetime.today().strftime(\"%y%m%d\")\n# nvfl_nightly_version = f\"{nvfl_version}.dev{yymmdd}\"\n# \n# if os.environ.get(\"NVFL_RELEASE\"):\n# package_name = \"nvflare\"\n# version = nvfl_version\n# else:\n# package_name = \"nvflare-nightly\"\n# version = nvfl_nightly_version\n# \n\nif os.path.exists(os.path.join(this_directory, \"nvflare\", \"poc.zip\")):\n os.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\nshutil.make_archive(base_name=\"poc\", format=\"zip\", root_dir=os.path.join(this_directory, \"nvflare\"), base_dir=\"poc\")\nshutil.move(\"poc.zip\", os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\npackage_name = \"nvflare\"\n\nsetup(\n name=package_name,\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Federated Learning Application Runtime Environment\",\n url=\"https://github.com/NVIDIA/NVFlare\",\n package_dir={\"nvflare\": \"nvflare\"},\n packages=find_packages(\n where=\".\",\n include=[\n \"*\",\n ],\n exclude=[\n \"test\",\n ],\n ),\n package_data={\"\": [\"*.yml\", \"*.html\", \"poc.zip\"]},\n zip_safe=True,\n license_files=(\"LICENSE.pdf\",),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: Other/Proprietary License\",\n \"Operating System :: OS Independent\",\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n python_requires=\">=3.7\",\n # install_requires=list(pkutils.parse_requirements(\"requirements.txt\")),\n install_requires=[\n \"PyYAML\",\n \"psutil\",\n \"numpy\",\n \"grpcio\",\n \"google-api-python-client\",\n \"cryptography\",\n \"tenseal==0.3.0\",\n ],\n entry_points={\n \"console_scripts\": [\n \"provision=nvflare.lighter.provision:main\",\n \"poc=nvflare.lighter.poc:main\",\n \"authz_preview=nvflare.fuel.hci.tools.authz_preview:main\",\n ],\n },\n)\n\nos.remove(os.path.join(this_directory, \"nvflare\", \"poc.zip\"))\n", "path": "setup.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport tenseal as ts\n\nfrom nvflare.lighter.spec import Builder\n\n\nclass HEBuilder(Builder):\n def __init__(\n self,\n poly_modulus_degree=8192,\n coeff_mod_bit_sizes=[60, 40, 40],\n scale_bits=40,\n scheme=\"CKKS\",\n ):\n \"\"\"Build Homomorphic related contents.\n\n Generates Tenseal homomorphic encryption context for server and client and writes them to server and client\n participant folders.\n\n Args:\n poly_modulus_degree: defaults to 8192.\n coeff_mod_bit_sizes: defaults to [60, 40, 40].\n scale_bits: defaults to 40.\n scheme: defaults to \"CKKS\".\n \"\"\"\n self._context = None\n self.scheme_type_mapping = {\n \"CKKS\": ts.SCHEME_TYPE.CKKS,\n \"BFV\": ts.SCHEME_TYPE.BFV,\n }\n self.poly_modulus_degree = poly_modulus_degree\n self.coeff_mod_bit_sizes = coeff_mod_bit_sizes\n self.scale_bits = scale_bits\n _scheme = scheme\n # Setup TenSEAL context\n self.scheme_type = self.scheme_type_mapping[_scheme]\n self.serialized = None\n\n def initialize(self, ctx):\n self._context = ts.context(\n self.scheme_type,\n poly_modulus_degree=self.poly_modulus_degree,\n coeff_mod_bit_sizes=self.coeff_mod_bit_sizes,\n encryption_type=ts.ENCRYPTION_TYPE.SYMMETRIC,\n )\n # dynamically call different generate keys method\n # getattr(self._context, f'generate_{self.key_type}_keys')()\n self._context.generate_relin_keys()\n self._context.global_scale = 2 ** self.scale_bits\n\n def build(self, study, ctx):\n server = study.get_participants_by_type(\"server\")\n dest_dir = self.get_kit_dir(server, ctx)\n with open(os.path.join(dest_dir, \"server_context.tenseal\"), \"wb\") as f:\n f.write(self.get_serialized_context())\n for client in study.get_participants_by_type(\"client\", first_only=False):\n dest_dir = self.get_kit_dir(client, ctx)\n with open(os.path.join(dest_dir, \"client_context.tenseal\"), \"wb\") as f:\n f.write(self.get_serialized_context(is_client=True))\n\n def get_serialized_context(self, is_client=False):\n _serialized_context = self._context.serialize(\n save_public_key=is_client,\n save_secret_key=is_client,\n save_galois_keys=False,\n save_relin_keys=True,\n )\n return _serialized_context\n", "path": "nvflare/lighter/impl/he.py"}]} | 2,771 | 632 |
gh_patches_debug_18081 | rasdani/github-patches | git_diff | jupyterhub__zero-to-jupyterhub-k8s-31 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
add link to github repo on website
I think there should be an obvious link on the website to the repo / issue tracker so that people can leave feedback more easily.
</issue>
<code>
[start of doc/source/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Learning with JupyterHub documentation build configuration file, created by
5 # sphinx-quickstart on Fri Mar 17 16:07:58 2017.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23 import recommonmark
24
25 # -- General configuration ------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 #
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = ['sphinx.ext.mathjax']
35
36 # Add any paths that contain templates here, relative to this directory.
37 templates_path = ['_templates']
38
39 source_parsers = {
40 '.md': 'recommonmark.parser.CommonMarkParser',
41 }
42
43 # The suffix(es) of source filenames.
44 # You can specify multiple suffix as a list of string:
45 #
46 # source_suffix = ['.rst', '.md']
47 source_suffix = ['.rst', '.md']
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = u'Zero to JupyterHub with Kubernetes'
54 copyright = u'2017, Project Jupyter team'
55 author = u'Project Jupyter team'
56
57 # The version info for the project you're documenting, acts as replacement for
58 # |version| and |release|, also used in various other places throughout the
59 # built documents.
60 #
61 # The short X.Y version.
62 version = '0.1'
63 # The full version, including alpha/beta/rc tags.
64 release = '0.1'
65
66 # The language for content autogenerated by Sphinx. Refer to documentation
67 # for a list of supported languages.
68 #
69 # This is also used if you do content translation via gettext catalogs.
70 # Usually you set "language" from the command line for these cases.
71 language = None
72
73 # List of patterns, relative to source directory, that match files and
74 # directories to ignore when looking for source files.
75 # This patterns also effect to html_static_path and html_extra_path
76 exclude_patterns = []
77
78 # The name of the Pygments (syntax highlighting) style to use.
79 pygments_style = 'sphinx'
80
81 # If true, `todo` and `todoList` produce output, else they produce nothing.
82 todo_include_todos = False
83
84
85 # -- Options for HTML output ----------------------------------------------
86
87 # The theme to use for HTML and HTML Help pages. See the documentation for
88 # a list of builtin themes.
89 #
90 html_theme = 'alabaster'
91 html_favicon = '_static/images/logo/favicon.ico'
92 html_logo = '_static/images/logo/logo.png'
93
94 # Theme options are theme-specific and customize the look and feel of a theme
95 # further. For a list of options available for each theme, see the
96 # documentation.
97 #
98 # html_theme_options = {}
99
100 # Add any paths that contain custom static files (such as style sheets) here,
101 # relative to this directory. They are copied after the builtin static files,
102 # so a file named "default.css" will overwrite the builtin "default.css".
103 html_static_path = ['_static']
104
105
106 # -- Options for HTMLHelp output ------------------------------------------
107
108 # Output file base name for HTML help builder.
109 htmlhelp_basename = 'ZeroToJupyterhubDoc'
110
111
112 # -- Options for LaTeX output ---------------------------------------------
113
114 latex_elements = {
115 # The paper size ('letterpaper' or 'a4paper').
116 #
117 # 'papersize': 'letterpaper',
118
119 # The font size ('10pt', '11pt' or '12pt').
120 #
121 # 'pointsize': '10pt',
122
123 # Additional stuff for the LaTeX preamble.
124 #
125 # 'preamble': '',
126
127 # Latex figure (float) alignment
128 #
129 # 'figure_align': 'htbp',
130 }
131
132 # Grouping the document tree into LaTeX files. List of tuples
133 # (source start file, target name, title,
134 # author, documentclass [howto, manual, or own class]).
135 latex_documents = [
136 (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub',
137 'Chris Holdgraf', 'manual'),
138 ]
139
140
141 # -- Options for manual page output ---------------------------------------
142
143 # One entry per manual page. List of tuples
144 # (source start file, name, description, authors, manual section).
145 man_pages = [
146 (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub',
147 [author], 1)
148 ]
149
150
151 # -- Options for Texinfo output -------------------------------------------
152
153 # Grouping the document tree into Texinfo files. List of tuples
154 # (source start file, target name, title, author,
155 # dir menu entry, description, category)
156 texinfo_documents = [
157 (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub',
158 author, 'ZeroToJupyterhubDoc', 'One line description of project.',
159 'Miscellaneous'),
160 ]
161
162
163
164 # -- Options for Epub output ----------------------------------------------
165
166 # Bibliographic Dublin Core info.
167 epub_title = project
168 epub_author = author
169 epub_publisher = author
170 epub_copyright = copyright
171
172 # The unique identifier of the text. This can be a ISBN number
173 # or the project homepage.
174 #
175 # epub_identifier = ''
176
177 # A unique identification for the text.
178 #
179 # epub_uid = ''
180
181 # A list of files that should not be packed into the epub file.
182 epub_exclude_files = ['search.html']
183
[end of doc/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/source/conf.py b/doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -95,13 +95,23 @@
# further. For a list of options available for each theme, see the
# documentation.
#
-# html_theme_options = {}
+html_theme_options = {
+ 'show_powered_by': False,
+ 'github_user': 'jupyterhub',
+ 'github_repo': 'zero-to-jupyterhub-k8s',
+ 'github_banner': False,
+ 'show_related': False
+}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
+html_sidebars = {
+ 'index': ['sidebarintro.html'],
+}
+
# -- Options for HTMLHelp output ------------------------------------------
| {"golden_diff": "diff --git a/doc/source/conf.py b/doc/source/conf.py\n--- a/doc/source/conf.py\n+++ b/doc/source/conf.py\n@@ -95,13 +95,23 @@\n # further. For a list of options available for each theme, see the\n # documentation.\n #\n-# html_theme_options = {}\n+html_theme_options = {\n+ 'show_powered_by': False,\n+ 'github_user': 'jupyterhub',\n+ 'github_repo': 'zero-to-jupyterhub-k8s',\n+ 'github_banner': False,\n+ 'show_related': False\n+}\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = ['_static']\n \n+html_sidebars = {\n+ 'index': ['sidebarintro.html'],\n+}\n+\n \n # -- Options for HTMLHelp output ------------------------------------------\n", "issue": "add link to github repo on website\nI think there should be an obvious link on the website to the repo / issue tracker so that people can leave feedback more easily.\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Learning with JupyterHub documentation build configuration file, created by\n# sphinx-quickstart on Fri Mar 17 16:07:58 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport recommonmark\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.mathjax']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\nsource_parsers = {\n '.md': 'recommonmark.parser.CommonMarkParser',\n}\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'Zero to JupyterHub with Kubernetes'\ncopyright = u'2017, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '0.1'\n# The full version, including alpha/beta/rc tags.\nrelease = '0.1'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\nhtml_favicon = '_static/images/logo/favicon.ico'\nhtml_logo = '_static/images/logo/logo.png'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ZeroToJupyterhubDoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ZeroToJupyterhubDoc.tex', 'Zero to JupyterHub',\n 'Chris Holdgraf', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'zerotojupyterhub', 'Zero to JupyterHub',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ZeroToJupyterhubDoc', 'Zero to JupyterHub',\n author, 'ZeroToJupyterhubDoc', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n", "path": "doc/source/conf.py"}]} | 2,327 | 219 |
gh_patches_debug_950 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-2204 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
torch.div() (1.6.0) does not have 'rounding_mode' parameter
According to the torch 1.6.0 here: https://pytorch.org/docs/1.6.0/generated/torch.div.html?highlight=torch%20div#torch.div
there is no 'rounding_mode' parameter.
But in translator:
https://github.com/OpenNMT/OpenNMT-py/blob/0f411ce11a83b18c0223ac94ccc11a35403763df/onmt/translate/beam_search.py#L282
That's why I receive this error:
```
onmt_translate -model ./../output/test/nmt/f0/run/model_step_100.pt -src ./../output/test/nmt/f0/src-test.txt -output ./../output/test/nmt/f0/test.epoch100.pred.csv -gpu 0 --min_length 2 -verbose
[2022-09-15 20:32:19,980 INFO] Translating shard 0.
Traceback (most recent call last):
File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "c:\programdata\anaconda3\envs\nuecg\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\envs\nuecg\Scripts\onmt_translate.exe\__main__.py", line 7, in <module>
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 54, in main
translate(opt)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\bin\translate.py", line 38, in translate
align_debug=opt.align_debug
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 440, in translate
phrase_table=phrase_table)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 487, in _translate
batch, data.src_vocabs, attn_debug
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 861, in translate_batch
batch, src_vocabs, decode_strategy
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\translator.py", line 947, in _translate_batch_with_strategy
decode_strategy.advance(log_probs, attn)
File "c:\programdata\anaconda3\envs\nuecg\lib\site-packages\onmt\translate\beam_search.py", line 283, in advance
rounding_mode='trunc')
TypeError: div() got an unexpected keyword argument 'rounding_mode'
```
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 from setuptools import setup, find_packages
3 from os import path
4
5 this_directory = path.abspath(path.dirname(__file__))
6 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
7 long_description = f.read()
8
9 setup(
10 name='OpenNMT-py',
11 description='A python implementation of OpenNMT',
12 long_description=long_description,
13 long_description_content_type='text/markdown',
14 version='2.3.0',
15 packages=find_packages(),
16 project_urls={
17 "Documentation": "http://opennmt.net/OpenNMT-py/",
18 "Forum": "http://forum.opennmt.net/",
19 "Gitter": "https://gitter.im/OpenNMT/OpenNMT-py",
20 "Source": "https://github.com/OpenNMT/OpenNMT-py/"
21 },
22 python_requires=">=3.5",
23 install_requires=[
24 "torch>=1.6.0",
25 "torchtext==0.5.0",
26 "configargparse",
27 "tensorboard>=2.3",
28 "flask",
29 "waitress",
30 "pyonmttok>=1.23,<2",
31 "pyyaml",
32 "sacrebleu"
33 ],
34 entry_points={
35 "console_scripts": [
36 "onmt_server=onmt.bin.server:main",
37 "onmt_train=onmt.bin.train:main",
38 "onmt_translate=onmt.bin.translate:main",
39 "onmt_translate_dynamic=onmt.bin.translate_dynamic:main",
40 "onmt_release_model=onmt.bin.release_model:main",
41 "onmt_average_models=onmt.bin.average_models:main",
42 "onmt_build_vocab=onmt.bin.build_vocab:main"
43 ],
44 }
45 )
46
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@
},
python_requires=">=3.5",
install_requires=[
- "torch>=1.6.0",
+ "torch>=1.9.0",
"torchtext==0.5.0",
"configargparse",
"tensorboard>=2.3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -21,7 +21,7 @@\n },\n python_requires=\">=3.5\",\n install_requires=[\n- \"torch>=1.6.0\",\n+ \"torch>=1.9.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n", "issue": "torch.div() (1.6.0) does not have 'rounding_mode' parameter\nAccording to the torch 1.6.0 here: https://pytorch.org/docs/1.6.0/generated/torch.div.html?highlight=torch%20div#torch.div\r\nthere is no 'rounding_mode' parameter. \r\n\r\nBut in translator:\r\nhttps://github.com/OpenNMT/OpenNMT-py/blob/0f411ce11a83b18c0223ac94ccc11a35403763df/onmt/translate/beam_search.py#L282\r\n\r\nThat's why I receive this error:\r\n```\r\nonmt_translate -model ./../output/test/nmt/f0/run/model_step_100.pt -src ./../output/test/nmt/f0/src-test.txt -output ./../output/test/nmt/f0/test.epoch100.pred.csv -gpu 0 --min_length 2 -verbose \r\n[2022-09-15 20:32:19,980 INFO] Translating shard 0.\r\nTraceback (most recent call last):\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\nuecg\\Scripts\\onmt_translate.exe\\__main__.py\", line 7, in <module>\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\bin\\translate.py\", line 54, in main\r\n translate(opt)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\bin\\translate.py\", line 38, in translate\r\n align_debug=opt.align_debug\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 440, in translate\r\n phrase_table=phrase_table)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 487, in _translate\r\n batch, data.src_vocabs, attn_debug\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 861, in translate_batch\r\n batch, src_vocabs, decode_strategy\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\translator.py\", line 947, in _translate_batch_with_strategy\r\n decode_strategy.advance(log_probs, attn)\r\n File \"c:\\programdata\\anaconda3\\envs\\nuecg\\lib\\site-packages\\onmt\\translate\\beam_search.py\", line 283, in advance\r\n rounding_mode='trunc')\r\nTypeError: div() got an unexpected keyword argument 'rounding_mode'\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='OpenNMT-py',\n description='A python implementation of OpenNMT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n version='2.3.0',\n packages=find_packages(),\n project_urls={\n \"Documentation\": \"http://opennmt.net/OpenNMT-py/\",\n \"Forum\": \"http://forum.opennmt.net/\",\n \"Gitter\": \"https://gitter.im/OpenNMT/OpenNMT-py\",\n \"Source\": \"https://github.com/OpenNMT/OpenNMT-py/\"\n },\n python_requires=\">=3.5\",\n install_requires=[\n \"torch>=1.6.0\",\n \"torchtext==0.5.0\",\n \"configargparse\",\n \"tensorboard>=2.3\",\n \"flask\",\n \"waitress\",\n \"pyonmttok>=1.23,<2\",\n \"pyyaml\",\n \"sacrebleu\"\n ],\n entry_points={\n \"console_scripts\": [\n \"onmt_server=onmt.bin.server:main\",\n \"onmt_train=onmt.bin.train:main\",\n \"onmt_translate=onmt.bin.translate:main\",\n \"onmt_translate_dynamic=onmt.bin.translate_dynamic:main\",\n \"onmt_release_model=onmt.bin.release_model:main\",\n \"onmt_average_models=onmt.bin.average_models:main\",\n \"onmt_build_vocab=onmt.bin.build_vocab:main\"\n ],\n }\n)\n", "path": "setup.py"}]} | 1,734 | 95 |
gh_patches_debug_9160 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider ymca is broken
During the global build at 2021-06-02-14-42-40, spider **ymca** failed with **0 features** and **87 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ymca.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ymca.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ymca.geojson))
</issue>
<code>
[start of locations/spiders/ymca.py]
1 # -*- coding: utf-8 -*-
2 from datetime import datetime
3 import json
4 import re
5 from urllib.parse import urlencode
6
7 import scrapy
8
9 from locations.items import GeojsonPointItem
10 from locations.hours import OpeningHours
11
12 SINGLE_POINT_STATES = [
13 ("0,64.0685,-152.2782,AK"),
14 ("1,20.6538883744,-157.8631750471,HI"),
15 ]
16
17 HUNDRED_MILES_STATES = {"MT", "WY", "SD", "ND", "NE", "NV", "AZ", "NM", "UT", "ID"}
18 TWENTYFIVE_MILES_STATES = {"MD", "OH", "FL", "IL", "IA", "WI", "MN", "RI", "MA", "NH",
19 "SC", "NC", "NJ", "WA", "CA", "PA", "NY"}
20 ADDITONAL_CITIES = [
21 "Los Angeles, CA",
22 "New York, NY",
23 "Boston, MA",
24 "Philadelphia, PA",
25 "Dallas, TX",
26 "Houston, TX",
27 "Seattle, WA",
28 "San Francisco, CA",
29 "Denver, CO",
30 "Minneapolis, MN",
31 "Omaha, NE",
32 "St. Louis, MO",
33 "Chicago, IL",
34 "Montgomery, AL",
35 "Orlando, FL",
36 "St. Petersburg, FL",
37 "Atlanta, GA",
38 "Poughkeepsie, NY",
39 "Hartford, CT",
40 "Concord, NH"
41 ]
42
43
44 class YmcaSpider(scrapy.Spider):
45 name = "ymca"
46 item_attributes = { 'brand': "YMCA" }
47 allowed_domains = ["ymca.net"]
48 download_delay = 0.5
49
50 def start_requests(self):
51 url = 'https://www.ymca.net/find-your-y/?'
52
53 for point in SINGLE_POINT_STATES:
54 _, lat, lon, state = point.strip().split(',')
55 params = {"address": "{},{}".format(lat, lon)}
56 yield scrapy.Request(url=url + urlencode(params))
57
58 with open('./locations/searchable_points/us_centroids_100mile_radius_state.csv') as points:
59 next(points)
60 for point in points:
61 _, lat, lon, state = point.strip().split(',')
62 if state in HUNDRED_MILES_STATES:
63 params = {"address": "{},{}".format(lat, lon)}
64 yield scrapy.Request(url=url + urlencode(params))
65
66 with open('./locations/searchable_points/us_centroids_25mile_radius_state.csv') as points:
67 next(points)
68 for point in points:
69 _, lat, lon, state = point.strip().split(',')
70 if state in TWENTYFIVE_MILES_STATES:
71 params = {"address": "{},{}".format(lat, lon)}
72 yield scrapy.Request(url=url + urlencode(params))
73
74 with open('./locations/searchable_points/us_centroids_50mile_radius_state.csv') as points:
75 next(points)
76 for point in points:
77 _, lat, lon, state = point.strip().split(',')
78 if state not in HUNDRED_MILES_STATES.union(TWENTYFIVE_MILES_STATES).union({"AK", "HI"}):
79 params = {"address": "{},{}".format(lat, lon)}
80 yield scrapy.Request(url=url + urlencode(params))
81
82 for city in ADDITONAL_CITIES:
83 params = {"address": city}
84 yield scrapy.Request(url=url + urlencode(params))
85
86 def parse_hours(self, hours):
87 opening_hours = OpeningHours()
88
89 for hour in hours:
90 hour = hour.strip()
91 if hour == "Hours of Operation:":
92 continue
93
94 try:
95 day, open_time, close_time = re.search(r'(.*?):\s(.*?)\s-\s(.*?)$', hour).groups()
96 except AttributeError: # closed
97 continue
98 open_time = open_time.replace('.', '')
99 close_time = close_time.replace('.', '')
100
101 open_time = (datetime.strptime(open_time, '%I:%M %p')
102 if ":" in open_time
103 else datetime.strptime(open_time, '%I %p')).strftime('%H:%M')
104 close_time = (datetime.strptime(close_time, '%I:%M %p')
105 if ":" in close_time
106 else datetime.strptime(close_time, '%I %p')).strftime('%H:%M')
107
108 opening_hours.add_range(day=day[:2],
109 open_time=open_time,
110 close_time=close_time,
111 time_format='%H:%M')
112 return opening_hours.as_opening_hours()
113
114 def parse_location(self, response):
115 p = response.xpath('//main//p[1]/text()').extract()
116 p = [x.strip() for x in p if x.strip()]
117
118 phone = p.pop(-1) # last line is phone number
119 city, state, postcode = re.search(r'(.*?), ([A-Z]{2}) ([\d-]+)$', p.pop(-1)).groups() # next to last line is city/state/zip
120 address = " ".join(p) # every thing left is street address
121
122 properties = {
123 'ref': re.search(r'.+/?id=(.+)', response.url).group(1),
124 'name': response.xpath('//main//h1/text()').extract_first(),
125 'addr_full': address,
126 'city': city,
127 'state': state,
128 'postcode': postcode,
129 'country': 'US',
130 'lat': float(response.xpath('//div[@id="y-profile-position"]/@data-latitude').extract_first()),
131 'lon': float(response.xpath('//div[@id="y-profile-position"]/@data-longitude').extract_first()),
132 'phone': phone.replace("Phone: ", ""),
133 'website': response.xpath('//div[@id="y-profile-position"]/@data-url').extract_first()
134 }
135
136 properties['opening_hours'] = self.parse_hours(response.xpath('//main//p[contains(text(), "Hours")]/text()').extract())
137
138 yield GeojsonPointItem(**properties)
139
140 def parse(self, response):
141 urls = response.xpath('//main//ul[not(contains(@class, "ymca-pagination"))]/li/h3//a/@href').extract()
142
143 for url in urls:
144 yield scrapy.Request(response.urljoin(url), callback=self.parse_location)
145
[end of locations/spiders/ymca.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/ymca.py b/locations/spiders/ymca.py
--- a/locations/spiders/ymca.py
+++ b/locations/spiders/ymca.py
@@ -44,11 +44,11 @@
class YmcaSpider(scrapy.Spider):
name = "ymca"
item_attributes = { 'brand': "YMCA" }
- allowed_domains = ["ymca.net"]
+ allowed_domains = ["ymca.org"]
download_delay = 0.5
def start_requests(self):
- url = 'https://www.ymca.net/find-your-y/?'
+ url = 'https://www.ymca.org/find-your-y/?'
for point in SINGLE_POINT_STATES:
_, lat, lon, state = point.strip().split(',')
| {"golden_diff": "diff --git a/locations/spiders/ymca.py b/locations/spiders/ymca.py\n--- a/locations/spiders/ymca.py\n+++ b/locations/spiders/ymca.py\n@@ -44,11 +44,11 @@\n class YmcaSpider(scrapy.Spider):\n name = \"ymca\"\n item_attributes = { 'brand': \"YMCA\" }\n- allowed_domains = [\"ymca.net\"]\n+ allowed_domains = [\"ymca.org\"]\n download_delay = 0.5\n \n def start_requests(self):\n- url = 'https://www.ymca.net/find-your-y/?'\n+ url = 'https://www.ymca.org/find-your-y/?'\n \n for point in SINGLE_POINT_STATES:\n _, lat, lon, state = point.strip().split(',')\n", "issue": "Spider ymca is broken\nDuring the global build at 2021-06-02-14-42-40, spider **ymca** failed with **0 features** and **87 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/ymca.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ymca.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/ymca.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport json\nimport re\nfrom urllib.parse import urlencode\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nSINGLE_POINT_STATES = [\n (\"0,64.0685,-152.2782,AK\"),\n (\"1,20.6538883744,-157.8631750471,HI\"),\n]\n\nHUNDRED_MILES_STATES = {\"MT\", \"WY\", \"SD\", \"ND\", \"NE\", \"NV\", \"AZ\", \"NM\", \"UT\", \"ID\"}\nTWENTYFIVE_MILES_STATES = {\"MD\", \"OH\", \"FL\", \"IL\", \"IA\", \"WI\", \"MN\", \"RI\", \"MA\", \"NH\",\n \"SC\", \"NC\", \"NJ\", \"WA\", \"CA\", \"PA\", \"NY\"}\nADDITONAL_CITIES = [\n \"Los Angeles, CA\",\n \"New York, NY\",\n \"Boston, MA\",\n \"Philadelphia, PA\",\n \"Dallas, TX\",\n \"Houston, TX\",\n \"Seattle, WA\",\n \"San Francisco, CA\",\n \"Denver, CO\",\n \"Minneapolis, MN\",\n \"Omaha, NE\",\n \"St. Louis, MO\",\n \"Chicago, IL\",\n \"Montgomery, AL\",\n \"Orlando, FL\",\n \"St. Petersburg, FL\",\n \"Atlanta, GA\",\n \"Poughkeepsie, NY\",\n \"Hartford, CT\",\n \"Concord, NH\"\n]\n\n\nclass YmcaSpider(scrapy.Spider):\n name = \"ymca\"\n item_attributes = { 'brand': \"YMCA\" }\n allowed_domains = [\"ymca.net\"]\n download_delay = 0.5\n\n def start_requests(self):\n url = 'https://www.ymca.net/find-your-y/?'\n\n for point in SINGLE_POINT_STATES:\n _, lat, lon, state = point.strip().split(',')\n params = {\"address\": \"{},{}\".format(lat, lon)}\n yield scrapy.Request(url=url + urlencode(params))\n\n with open('./locations/searchable_points/us_centroids_100mile_radius_state.csv') as points:\n next(points)\n for point in points:\n _, lat, lon, state = point.strip().split(',')\n if state in HUNDRED_MILES_STATES:\n params = {\"address\": \"{},{}\".format(lat, lon)}\n yield scrapy.Request(url=url + urlencode(params))\n\n with open('./locations/searchable_points/us_centroids_25mile_radius_state.csv') as points:\n next(points)\n for point in points:\n _, lat, lon, state = point.strip().split(',')\n if state in TWENTYFIVE_MILES_STATES:\n params = {\"address\": \"{},{}\".format(lat, lon)}\n yield scrapy.Request(url=url + urlencode(params))\n\n with open('./locations/searchable_points/us_centroids_50mile_radius_state.csv') as points:\n next(points)\n for point in points:\n _, lat, lon, state = point.strip().split(',')\n if state not in HUNDRED_MILES_STATES.union(TWENTYFIVE_MILES_STATES).union({\"AK\", \"HI\"}):\n params = {\"address\": \"{},{}\".format(lat, lon)}\n yield scrapy.Request(url=url + urlencode(params))\n\n for city in ADDITONAL_CITIES:\n params = {\"address\": city}\n yield scrapy.Request(url=url + urlencode(params))\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n hour = hour.strip()\n if hour == \"Hours of Operation:\":\n continue\n\n try:\n day, open_time, close_time = re.search(r'(.*?):\\s(.*?)\\s-\\s(.*?)$', hour).groups()\n except AttributeError: # closed\n continue\n open_time = open_time.replace('.', '')\n close_time = close_time.replace('.', '')\n\n open_time = (datetime.strptime(open_time, '%I:%M %p')\n if \":\" in open_time\n else datetime.strptime(open_time, '%I %p')).strftime('%H:%M')\n close_time = (datetime.strptime(close_time, '%I:%M %p')\n if \":\" in close_time\n else datetime.strptime(close_time, '%I %p')).strftime('%H:%M')\n\n opening_hours.add_range(day=day[:2],\n open_time=open_time,\n close_time=close_time,\n time_format='%H:%M')\n return opening_hours.as_opening_hours()\n\n def parse_location(self, response):\n p = response.xpath('//main//p[1]/text()').extract()\n p = [x.strip() for x in p if x.strip()]\n\n phone = p.pop(-1) # last line is phone number\n city, state, postcode = re.search(r'(.*?), ([A-Z]{2}) ([\\d-]+)$', p.pop(-1)).groups() # next to last line is city/state/zip\n address = \" \".join(p) # every thing left is street address\n\n properties = {\n 'ref': re.search(r'.+/?id=(.+)', response.url).group(1),\n 'name': response.xpath('//main//h1/text()').extract_first(),\n 'addr_full': address,\n 'city': city,\n 'state': state,\n 'postcode': postcode,\n 'country': 'US',\n 'lat': float(response.xpath('//div[@id=\"y-profile-position\"]/@data-latitude').extract_first()),\n 'lon': float(response.xpath('//div[@id=\"y-profile-position\"]/@data-longitude').extract_first()),\n 'phone': phone.replace(\"Phone: \", \"\"),\n 'website': response.xpath('//div[@id=\"y-profile-position\"]/@data-url').extract_first()\n }\n \n properties['opening_hours'] = self.parse_hours(response.xpath('//main//p[contains(text(), \"Hours\")]/text()').extract())\n \n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n urls = response.xpath('//main//ul[not(contains(@class, \"ymca-pagination\"))]/li/h3//a/@href').extract()\n\n for url in urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_location)\n", "path": "locations/spiders/ymca.py"}]} | 2,433 | 179 |
gh_patches_debug_40667 | rasdani/github-patches | git_diff | nilearn__nilearn-1054 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
expandpath should be applied to the "memory" argument
The CacheMixin should be modified so that when the argument is a basestring, it is expanded, with the same logic to turn this off as the globbing that we already have.
</issue>
<code>
[start of nilearn/__init__.py]
1 """
2 Machine Learning module for NeuroImaging in python
3 ==================================================
4
5 Documentation is available in the docstrings and online at
6 http://nilearn.github.io.
7
8 Contents
9 --------
10 Nilearn aims at simplifying the use of the scikit-learn package in the context of
11 neuroimaging. It provides specific input/output functions, algorithms and
12 visualization tools.
13
14 Submodules
15 ---------
16 datasets --- Utilities to download NeuroImaging datasets
17 decoding --- Decoding tools and algorithms
18 decomposition --- Includes a subject level variant of the ICA
19 algorithm called Canonical ICA
20 connectome --- Set of tools for computing functional connectivity matrices
21 and for sparse multi-subjects learning of Gaussian graphical models
22 image --- Set of functions defining mathematical operations
23 working on Niimg-like objects
24 input_data --- includes scikit-learn tranformers and tools to
25 preprocess neuro-imaging data
26 masking --- Utilities to compute and operate on brain masks
27 mass_univariate --- Defines a Massively Univariate Linear Model
28 estimated with OLS and permutation test
29 plotting --- Plotting code for nilearn
30 region --- Set of functions for extracting region-defined
31 signals
32 signal --- Set of preprocessing functions for time series
33 """
34
35 import gzip
36
37 from .version import _check_module_dependencies, __version__
38
39 _check_module_dependencies()
40
41 # Monkey-patch gzip to have faster reads on large gzip files
42 if hasattr(gzip.GzipFile, 'max_read_chunk'):
43 gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb
44
45 # Boolean controlling the default globbing technique when using check_niimg
46 # Default value it True, set it to False to completely deactivate use of glob
47 # module
48 EXPAND_PATH_WILDCARDS = True
49
50 # Boolean controlling whether the joblib caches should be
51 # flushed if the version of certain modules changes (eg nibabel, as it
52 # does not respect the backward compatibility in some of its internal
53 # structures
54 # This is used in nilearn._utils.cache_mixin
55 CHECK_CACHE_VERSION = True
56
57 # list all submodules available in nilearn and version
58 __all__ = ['datasets', 'decoding', 'decomposition', 'connectome',
59 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',
60 'region', 'signal', '__version__']
61
[end of nilearn/__init__.py]
[start of nilearn/_utils/cache_mixin.py]
1 """
2 Mixin for cache with joblib
3 """
4 # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais
5 # License: simplified BSD
6
7 import json
8 import warnings
9 import os
10 import shutil
11 from distutils.version import LooseVersion
12
13 import nibabel
14 from sklearn.externals.joblib import Memory
15
16 MEMORY_CLASSES = (Memory, )
17
18 try:
19 from joblib import Memory as JoblibMemory
20 MEMORY_CLASSES = (Memory, JoblibMemory)
21 except ImportError:
22 pass
23
24 import nilearn
25
26 from .compat import _basestring
27
28 __CACHE_CHECKED = dict()
29
30
31 def _safe_cache(memory, func, **kwargs):
32 """ A wrapper for mem.cache that flushes the cache if the version
33 number of nibabel has changed.
34 """
35 cachedir = memory.cachedir
36
37 if cachedir is None or cachedir in __CACHE_CHECKED:
38 return memory.cache(func, **kwargs)
39
40 version_file = os.path.join(cachedir, 'module_versions.json')
41
42 versions = dict()
43 if os.path.exists(version_file):
44 with open(version_file, 'r') as _version_file:
45 versions = json.load(_version_file)
46
47 modules = (nibabel, )
48 # Keep only the major + minor version numbers
49 my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])
50 for m in modules)
51 commons = set(versions.keys()).intersection(set(my_versions.keys()))
52 collisions = [m for m in commons if versions[m] != my_versions[m]]
53
54 # Flush cache if version collision
55 if len(collisions) > 0:
56 if nilearn.CHECK_CACHE_VERSION:
57 warnings.warn("Incompatible cache in %s: "
58 "different version of nibabel. Deleting "
59 "the cache. Put nilearn.CHECK_CACHE_VERSION "
60 "to false to avoid this behavior."
61 % cachedir)
62 try:
63 tmp_dir = (os.path.split(cachedir)[:-1]
64 + ('old_%i' % os.getpid(), ))
65 tmp_dir = os.path.join(*tmp_dir)
66 # We use rename + unlink to be more robust to race
67 # conditions
68 os.rename(cachedir, tmp_dir)
69 shutil.rmtree(tmp_dir)
70 except OSError:
71 # Another process could have removed this dir
72 pass
73
74 try:
75 os.makedirs(cachedir)
76 except OSError:
77 # File exists?
78 pass
79 else:
80 warnings.warn("Incompatible cache in %s: "
81 "old version of nibabel." % cachedir)
82
83 # Write json files if configuration is different
84 if versions != my_versions:
85 with open(version_file, 'w') as _version_file:
86 json.dump(my_versions, _version_file)
87
88 __CACHE_CHECKED[cachedir] = True
89
90 return memory.cache(func, **kwargs)
91
92
93 def cache(func, memory, func_memory_level=None, memory_level=None,
94 **kwargs):
95 """ Return a joblib.Memory object.
96
97 The memory_level determines the level above which the wrapped
98 function output is cached. By specifying a numeric value for
99 this level, the user can to control the amount of cache memory
100 used. This function will cache the function call or not
101 depending on the cache level.
102
103 Parameters
104 ----------
105 func: function
106 The function which output is to be cached.
107
108 memory: instance of joblib.Memory or string
109 Used to cache the function call.
110
111 func_memory_level: int, optional
112 The memory_level from which caching must be enabled for the wrapped
113 function.
114
115 memory_level: int, optional
116 The memory_level used to determine if function call must
117 be cached or not (if user_memory_level is equal of greater than
118 func_memory_level the function is cached)
119
120 kwargs: keyword arguments
121 The keyword arguments passed to memory.cache
122
123 Returns
124 -------
125 mem: joblib.MemorizedFunc
126 object that wraps the function func. This object may be
127 a no-op, if the requested level is lower than the value given
128 to _cache()). For consistency, a joblib.Memory object is always
129 returned.
130 """
131 verbose = kwargs.get('verbose', 0)
132
133 # memory_level and func_memory_level must be both None or both integers.
134 memory_levels = [memory_level, func_memory_level]
135 both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)
136 both_params_none = all(lvl is None for lvl in memory_levels)
137
138 if not (both_params_integers or both_params_none):
139 raise ValueError('Reference and user memory levels must be both None '
140 'or both integers.')
141
142 if memory is not None and (func_memory_level is None or
143 memory_level >= func_memory_level):
144 if isinstance(memory, _basestring):
145 memory = Memory(cachedir=memory, verbose=verbose)
146 if not isinstance(memory, MEMORY_CLASSES):
147 raise TypeError("'memory' argument must be a string or a "
148 "joblib.Memory object. "
149 "%s %s was given." % (memory, type(memory)))
150 if (memory.cachedir is None and memory_level is not None
151 and memory_level > 1):
152 warnings.warn("Caching has been enabled (memory_level = %d) "
153 "but no Memory object or path has been provided"
154 " (parameter memory). Caching deactivated for "
155 "function %s." %
156 (memory_level, func.__name__),
157 stacklevel=2)
158 else:
159 memory = Memory(cachedir=None, verbose=verbose)
160 return _safe_cache(memory, func, **kwargs)
161
162
163 class CacheMixin(object):
164 """Mixin to add caching to a class.
165
166 This class is a thin layer on top of joblib.Memory, that mainly adds a
167 "caching level", similar to a "log level".
168
169 Usage: to cache the results of a method, wrap it in self._cache()
170 defined by this class. Caching is performed only if the user-specified
171 cache level (self._memory_level) is greater than the value given as a
172 parameter to self._cache(). See _cache() documentation for details.
173 """
174 def _cache(self, func, func_memory_level=1, **kwargs):
175 """Return a joblib.Memory object.
176
177 The memory_level determines the level above which the wrapped
178 function output is cached. By specifying a numeric value for
179 this level, the user can to control the amount of cache memory
180 used. This function will cache the function call or not
181 depending on the cache level.
182
183 Parameters
184 ----------
185 func: function
186 The function the output of which is to be cached.
187
188 memory_level: int
189 The memory_level from which caching must be enabled for the wrapped
190 function.
191
192 Returns
193 -------
194 mem: joblib.Memory
195 object that wraps the function func. This object may be
196 a no-op, if the requested level is lower than the value given
197 to _cache()). For consistency, a joblib.Memory object is always
198 returned.
199
200 """
201
202 verbose = getattr(self, 'verbose', 0)
203
204 # Creates attributes if they don't exist
205 # This is to make creating them in __init__() optional.
206 if not hasattr(self, "memory_level"):
207 self.memory_level = 0
208 if not hasattr(self, "memory"):
209 self.memory = Memory(cachedir=None, verbose=verbose)
210 if isinstance(self.memory, _basestring):
211 self.memory = Memory(cachedir=self.memory, verbose=verbose)
212
213 # If cache level is 0 but a memory object has been provided, set
214 # memory_level to 1 with a warning.
215 if self.memory_level == 0:
216 if (isinstance(self.memory, _basestring)
217 or self.memory.cachedir is not None):
218 warnings.warn("memory_level is currently set to 0 but "
219 "a Memory object has been provided. "
220 "Setting memory_level to 1.")
221 self.memory_level = 1
222
223 return cache(func, self.memory, func_memory_level=func_memory_level,
224 memory_level=self.memory_level, **kwargs)
225
[end of nilearn/_utils/cache_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nilearn/__init__.py b/nilearn/__init__.py
--- a/nilearn/__init__.py
+++ b/nilearn/__init__.py
@@ -43,8 +43,9 @@
gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb
# Boolean controlling the default globbing technique when using check_niimg
-# Default value it True, set it to False to completely deactivate use of glob
-# module
+# and the os.path.expanduser usage in CacheMixin.
+# Default value it True, set it to False to completely deactivate this
+# behavior.
EXPAND_PATH_WILDCARDS = True
# Boolean controlling whether the joblib caches should be
diff --git a/nilearn/_utils/cache_mixin.py b/nilearn/_utils/cache_mixin.py
--- a/nilearn/_utils/cache_mixin.py
+++ b/nilearn/_utils/cache_mixin.py
@@ -208,17 +208,47 @@
if not hasattr(self, "memory"):
self.memory = Memory(cachedir=None, verbose=verbose)
if isinstance(self.memory, _basestring):
- self.memory = Memory(cachedir=self.memory, verbose=verbose)
+ cache_dir = self.memory
+ if nilearn.EXPAND_PATH_WILDCARDS:
+ cache_dir = os.path.expanduser(cache_dir)
+
+ # Perform some verifications on given path.
+ split_cache_dir = os.path.split(cache_dir)
+ if (len(split_cache_dir) > 1 and
+ (not os.path.exists(split_cache_dir[0]) and
+ split_cache_dir[0] != '')):
+ if (not nilearn.EXPAND_PATH_WILDCARDS and
+ cache_dir.startswith("~")):
+ # Maybe the user want to enable expanded user path.
+ error_msg = ("Given cache path parent directory doesn't "
+ "exists, you gave '{0}'. Enabling "
+ "nilearn.EXPAND_PATH_WILDCARDS could solve "
+ "this issue.".format(split_cache_dir[0]))
+ elif self.memory.startswith("~"):
+ # Path built on top of expanded user path doesn't exist.
+ error_msg = ("Given cache path parent directory doesn't "
+ "exists, you gave '{0}' which was expanded "
+ "as '{1}' but doesn't exist either. Use "
+ "nilearn.EXPAND_PATH_WILDCARDS to deactivate "
+ "auto expand user path (~) behavior."
+ .format(split_cache_dir[0],
+ os.path.dirname(self.memory)))
+ else:
+ # The given cache base path doesn't exist.
+ error_msg = ("Given cache path parent directory doesn't "
+ "exists, you gave '{0}'."
+ .format(split_cache_dir[0]))
+ raise ValueError(error_msg)
+
+ self.memory = Memory(cachedir=cache_dir, verbose=verbose)
# If cache level is 0 but a memory object has been provided, set
# memory_level to 1 with a warning.
- if self.memory_level == 0:
- if (isinstance(self.memory, _basestring)
- or self.memory.cachedir is not None):
- warnings.warn("memory_level is currently set to 0 but "
- "a Memory object has been provided. "
- "Setting memory_level to 1.")
- self.memory_level = 1
+ if self.memory_level == 0 and self.memory.cachedir is not None:
+ warnings.warn("memory_level is currently set to 0 but "
+ "a Memory object has been provided. "
+ "Setting memory_level to 1.")
+ self.memory_level = 1
return cache(func, self.memory, func_memory_level=func_memory_level,
memory_level=self.memory_level, **kwargs)
| {"golden_diff": "diff --git a/nilearn/__init__.py b/nilearn/__init__.py\n--- a/nilearn/__init__.py\n+++ b/nilearn/__init__.py\n@@ -43,8 +43,9 @@\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n \n # Boolean controlling the default globbing technique when using check_niimg\n-# Default value it True, set it to False to completely deactivate use of glob\n-# module\n+# and the os.path.expanduser usage in CacheMixin.\n+# Default value it True, set it to False to completely deactivate this\n+# behavior.\n EXPAND_PATH_WILDCARDS = True\n \n # Boolean controlling whether the joblib caches should be\ndiff --git a/nilearn/_utils/cache_mixin.py b/nilearn/_utils/cache_mixin.py\n--- a/nilearn/_utils/cache_mixin.py\n+++ b/nilearn/_utils/cache_mixin.py\n@@ -208,17 +208,47 @@\n if not hasattr(self, \"memory\"):\n self.memory = Memory(cachedir=None, verbose=verbose)\n if isinstance(self.memory, _basestring):\n- self.memory = Memory(cachedir=self.memory, verbose=verbose)\n+ cache_dir = self.memory\n+ if nilearn.EXPAND_PATH_WILDCARDS:\n+ cache_dir = os.path.expanduser(cache_dir)\n+\n+ # Perform some verifications on given path.\n+ split_cache_dir = os.path.split(cache_dir)\n+ if (len(split_cache_dir) > 1 and\n+ (not os.path.exists(split_cache_dir[0]) and\n+ split_cache_dir[0] != '')):\n+ if (not nilearn.EXPAND_PATH_WILDCARDS and\n+ cache_dir.startswith(\"~\")):\n+ # Maybe the user want to enable expanded user path.\n+ error_msg = (\"Given cache path parent directory doesn't \"\n+ \"exists, you gave '{0}'. Enabling \"\n+ \"nilearn.EXPAND_PATH_WILDCARDS could solve \"\n+ \"this issue.\".format(split_cache_dir[0]))\n+ elif self.memory.startswith(\"~\"):\n+ # Path built on top of expanded user path doesn't exist.\n+ error_msg = (\"Given cache path parent directory doesn't \"\n+ \"exists, you gave '{0}' which was expanded \"\n+ \"as '{1}' but doesn't exist either. Use \"\n+ \"nilearn.EXPAND_PATH_WILDCARDS to deactivate \"\n+ \"auto expand user path (~) behavior.\"\n+ .format(split_cache_dir[0],\n+ os.path.dirname(self.memory)))\n+ else:\n+ # The given cache base path doesn't exist.\n+ error_msg = (\"Given cache path parent directory doesn't \"\n+ \"exists, you gave '{0}'.\"\n+ .format(split_cache_dir[0]))\n+ raise ValueError(error_msg)\n+\n+ self.memory = Memory(cachedir=cache_dir, verbose=verbose)\n \n # If cache level is 0 but a memory object has been provided, set\n # memory_level to 1 with a warning.\n- if self.memory_level == 0:\n- if (isinstance(self.memory, _basestring)\n- or self.memory.cachedir is not None):\n- warnings.warn(\"memory_level is currently set to 0 but \"\n- \"a Memory object has been provided. \"\n- \"Setting memory_level to 1.\")\n- self.memory_level = 1\n+ if self.memory_level == 0 and self.memory.cachedir is not None:\n+ warnings.warn(\"memory_level is currently set to 0 but \"\n+ \"a Memory object has been provided. \"\n+ \"Setting memory_level to 1.\")\n+ self.memory_level = 1\n \n return cache(func, self.memory, func_memory_level=func_memory_level,\n memory_level=self.memory_level, **kwargs)\n", "issue": "expandpath should be applied to the \"memory\" argument\nThe CacheMixin should be modified so that when the argument is a basestring, it is expanded, with the same logic to turn this off as the globbing that we already have.\n\n", "before_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n==================================================\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\n\nfrom .version import _check_module_dependencies, __version__\n\n_check_module_dependencies()\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# Default value it True, set it to False to completely deactivate use of glob\n# module\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', '__version__']\n", "path": "nilearn/__init__.py"}, {"content": "\"\"\"\nMixin for cache with joblib\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais\n# License: simplified BSD\n\nimport json\nimport warnings\nimport os\nimport shutil\nfrom distutils.version import LooseVersion\n\nimport nibabel\nfrom sklearn.externals.joblib import Memory\n\nMEMORY_CLASSES = (Memory, )\n\ntry:\n from joblib import Memory as JoblibMemory\n MEMORY_CLASSES = (Memory, JoblibMemory)\nexcept ImportError:\n pass\n\nimport nilearn\n\nfrom .compat import _basestring\n\n__CACHE_CHECKED = dict()\n\n\ndef _safe_cache(memory, func, **kwargs):\n \"\"\" A wrapper for mem.cache that flushes the cache if the version\n number of nibabel has changed.\n \"\"\"\n cachedir = memory.cachedir\n\n if cachedir is None or cachedir in __CACHE_CHECKED:\n return memory.cache(func, **kwargs)\n\n version_file = os.path.join(cachedir, 'module_versions.json')\n\n versions = dict()\n if os.path.exists(version_file):\n with open(version_file, 'r') as _version_file:\n versions = json.load(_version_file)\n\n modules = (nibabel, )\n # Keep only the major + minor version numbers\n my_versions = dict((m.__name__, LooseVersion(m.__version__).version[:2])\n for m in modules)\n commons = set(versions.keys()).intersection(set(my_versions.keys()))\n collisions = [m for m in commons if versions[m] != my_versions[m]]\n\n # Flush cache if version collision\n if len(collisions) > 0:\n if nilearn.CHECK_CACHE_VERSION:\n warnings.warn(\"Incompatible cache in %s: \"\n \"different version of nibabel. Deleting \"\n \"the cache. Put nilearn.CHECK_CACHE_VERSION \"\n \"to false to avoid this behavior.\"\n % cachedir)\n try:\n tmp_dir = (os.path.split(cachedir)[:-1]\n + ('old_%i' % os.getpid(), ))\n tmp_dir = os.path.join(*tmp_dir)\n # We use rename + unlink to be more robust to race\n # conditions\n os.rename(cachedir, tmp_dir)\n shutil.rmtree(tmp_dir)\n except OSError:\n # Another process could have removed this dir\n pass\n\n try:\n os.makedirs(cachedir)\n except OSError:\n # File exists?\n pass\n else:\n warnings.warn(\"Incompatible cache in %s: \"\n \"old version of nibabel.\" % cachedir)\n\n # Write json files if configuration is different\n if versions != my_versions:\n with open(version_file, 'w') as _version_file:\n json.dump(my_versions, _version_file)\n\n __CACHE_CHECKED[cachedir] = True\n\n return memory.cache(func, **kwargs)\n\n\ndef cache(func, memory, func_memory_level=None, memory_level=None,\n **kwargs):\n \"\"\" Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function which output is to be cached.\n\n memory: instance of joblib.Memory or string\n Used to cache the function call.\n\n func_memory_level: int, optional\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n memory_level: int, optional\n The memory_level used to determine if function call must\n be cached or not (if user_memory_level is equal of greater than\n func_memory_level the function is cached)\n\n kwargs: keyword arguments\n The keyword arguments passed to memory.cache\n\n Returns\n -------\n mem: joblib.MemorizedFunc\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n \"\"\"\n verbose = kwargs.get('verbose', 0)\n\n # memory_level and func_memory_level must be both None or both integers.\n memory_levels = [memory_level, func_memory_level]\n both_params_integers = all(isinstance(lvl, int) for lvl in memory_levels)\n both_params_none = all(lvl is None for lvl in memory_levels)\n\n if not (both_params_integers or both_params_none):\n raise ValueError('Reference and user memory levels must be both None '\n 'or both integers.')\n\n if memory is not None and (func_memory_level is None or\n memory_level >= func_memory_level):\n if isinstance(memory, _basestring):\n memory = Memory(cachedir=memory, verbose=verbose)\n if not isinstance(memory, MEMORY_CLASSES):\n raise TypeError(\"'memory' argument must be a string or a \"\n \"joblib.Memory object. \"\n \"%s %s was given.\" % (memory, type(memory)))\n if (memory.cachedir is None and memory_level is not None\n and memory_level > 1):\n warnings.warn(\"Caching has been enabled (memory_level = %d) \"\n \"but no Memory object or path has been provided\"\n \" (parameter memory). Caching deactivated for \"\n \"function %s.\" %\n (memory_level, func.__name__),\n stacklevel=2)\n else:\n memory = Memory(cachedir=None, verbose=verbose)\n return _safe_cache(memory, func, **kwargs)\n\n\nclass CacheMixin(object):\n \"\"\"Mixin to add caching to a class.\n\n This class is a thin layer on top of joblib.Memory, that mainly adds a\n \"caching level\", similar to a \"log level\".\n\n Usage: to cache the results of a method, wrap it in self._cache()\n defined by this class. Caching is performed only if the user-specified\n cache level (self._memory_level) is greater than the value given as a\n parameter to self._cache(). See _cache() documentation for details.\n \"\"\"\n def _cache(self, func, func_memory_level=1, **kwargs):\n \"\"\"Return a joblib.Memory object.\n\n The memory_level determines the level above which the wrapped\n function output is cached. By specifying a numeric value for\n this level, the user can to control the amount of cache memory\n used. This function will cache the function call or not\n depending on the cache level.\n\n Parameters\n ----------\n func: function\n The function the output of which is to be cached.\n\n memory_level: int\n The memory_level from which caching must be enabled for the wrapped\n function.\n\n Returns\n -------\n mem: joblib.Memory\n object that wraps the function func. This object may be\n a no-op, if the requested level is lower than the value given\n to _cache()). For consistency, a joblib.Memory object is always\n returned.\n\n \"\"\"\n\n verbose = getattr(self, 'verbose', 0)\n\n # Creates attributes if they don't exist\n # This is to make creating them in __init__() optional.\n if not hasattr(self, \"memory_level\"):\n self.memory_level = 0\n if not hasattr(self, \"memory\"):\n self.memory = Memory(cachedir=None, verbose=verbose)\n if isinstance(self.memory, _basestring):\n self.memory = Memory(cachedir=self.memory, verbose=verbose)\n\n # If cache level is 0 but a memory object has been provided, set\n # memory_level to 1 with a warning.\n if self.memory_level == 0:\n if (isinstance(self.memory, _basestring)\n or self.memory.cachedir is not None):\n warnings.warn(\"memory_level is currently set to 0 but \"\n \"a Memory object has been provided. \"\n \"Setting memory_level to 1.\")\n self.memory_level = 1\n\n return cache(func, self.memory, func_memory_level=func_memory_level,\n memory_level=self.memory_level, **kwargs)\n", "path": "nilearn/_utils/cache_mixin.py"}]} | 3,595 | 865 |
gh_patches_debug_10001 | rasdani/github-patches | git_diff | apache__airflow-31477 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cleanup-pod CLI command fails due to incorrect host
### Apache Airflow version
2.6.1
### What happened
When running `airflow kubernetes cleanup-pods`, the API call to delete a pod fails. A snippet of the log is below:
```
urllib3.exceptions.MaxRetryError:
HTTPConnectionPool(host='localhost', port=80): Max retries exceeded with url: /api/v1/namespaces/airflow/pods/my-task-avd79fq1 (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f52f9aebfd0>: Failed to establish a new connection: [Errno 111] Connection refused'))
```
[The Kubernetes client provisioned in _delete_pod](https://github.com/apache/airflow/blob/main/airflow/cli/commands/kubernetes_command.py#L151) incorrectly has the host as `http:localhost`. On the scheduler pod if I start a Python environment I can see that the configuration differs from the `get_kube_client()` configuration:
```
>>> get_kube_client().api_client.configuration.host
'https://172.20.0.1:443'
>>> client.CoreV1Api().api_client.configuration.host
'http://localhost/'
```
On Airflow 2.5.3 these two clients have the same configuration.
It's possible I have some mistake in my configuration but I'm not sure what it could be. The above fails on 2.6.0 also.
### What you think should happen instead
Pods should clean up without error
### How to reproduce
Run the following from a Kubernetes deployment of Airflow:
```python
from airflow.kubernetes.kube_client import get_kube_client
from kubernetes import client
print(get_kube_client().api_client.configuration.host)
print(client.CoreV1Api().api_client.configuration.host)
```
Alternatively run `airflow kubernetes cleanup-pods` with pods available for cleanup
### Operating System
Debian GNU/Linux 11 (bullseye)
### Versions of Apache Airflow Providers
_No response_
### Deployment
Official Apache Airflow Helm Chart
### Deployment details
Using `in_cluster` configuration for KubernetesExecutor
### Anything else
_No response_
### Are you willing to submit PR?
- [X] Yes I am willing to submit a PR!
### Code of Conduct
- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)
</issue>
<code>
[start of airflow/cli/commands/kubernetes_command.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """Kubernetes sub-commands."""
18 from __future__ import annotations
19
20 import os
21 import sys
22 from datetime import datetime, timedelta
23
24 from kubernetes import client
25 from kubernetes.client.api_client import ApiClient
26 from kubernetes.client.rest import ApiException
27
28 from airflow.executors.kubernetes_executor import KubeConfig, create_pod_id
29 from airflow.kubernetes import pod_generator
30 from airflow.kubernetes.kube_client import get_kube_client
31 from airflow.kubernetes.pod_generator import PodGenerator
32 from airflow.models import DagRun, TaskInstance
33 from airflow.utils import cli as cli_utils, yaml
34 from airflow.utils.cli import get_dag
35
36
37 @cli_utils.action_cli
38 def generate_pod_yaml(args):
39 """Generates yaml files for each task in the DAG. Used for testing output of KubernetesExecutor."""
40 execution_date = args.execution_date
41 dag = get_dag(subdir=args.subdir, dag_id=args.dag_id)
42 yaml_output_path = args.output_path
43 dr = DagRun(dag.dag_id, execution_date=execution_date)
44 kube_config = KubeConfig()
45 for task in dag.tasks:
46 ti = TaskInstance(task, None)
47 ti.dag_run = dr
48 pod = PodGenerator.construct_pod(
49 dag_id=args.dag_id,
50 task_id=ti.task_id,
51 pod_id=create_pod_id(args.dag_id, ti.task_id),
52 try_number=ti.try_number,
53 kube_image=kube_config.kube_image,
54 date=ti.execution_date,
55 args=ti.command_as_list(),
56 pod_override_object=PodGenerator.from_obj(ti.executor_config),
57 scheduler_job_id="worker-config",
58 namespace=kube_config.executor_namespace,
59 base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file),
60 with_mutation_hook=True,
61 )
62 api_client = ApiClient()
63 date_string = pod_generator.datetime_to_label_safe_datestring(execution_date)
64 yaml_file_name = f"{args.dag_id}_{ti.task_id}_{date_string}.yml"
65 os.makedirs(os.path.dirname(yaml_output_path + "/airflow_yaml_output/"), exist_ok=True)
66 with open(yaml_output_path + "/airflow_yaml_output/" + yaml_file_name, "w") as output:
67 sanitized_pod = api_client.sanitize_for_serialization(pod)
68 output.write(yaml.dump(sanitized_pod))
69 print(f"YAML output can be found at {yaml_output_path}/airflow_yaml_output/")
70
71
72 @cli_utils.action_cli
73 def cleanup_pods(args):
74 """Clean up k8s pods in evicted/failed/succeeded/pending states."""
75 namespace = args.namespace
76
77 min_pending_minutes = args.min_pending_minutes
78 # protect newly created pods from deletion
79 if min_pending_minutes < 5:
80 min_pending_minutes = 5
81
82 # https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/
83 # All Containers in the Pod have terminated in success, and will not be restarted.
84 pod_succeeded = "succeeded"
85
86 # The Pod has been accepted by the Kubernetes cluster,
87 # but one or more of the containers has not been set up and made ready to run.
88 pod_pending = "pending"
89
90 # All Containers in the Pod have terminated, and at least one Container has terminated in failure.
91 # That is, the Container either exited with non-zero status or was terminated by the system.
92 pod_failed = "failed"
93
94 # https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/
95 pod_reason_evicted = "evicted"
96 # If pod is failed and restartPolicy is:
97 # * Always: Restart Container; Pod phase stays Running.
98 # * OnFailure: Restart Container; Pod phase stays Running.
99 # * Never: Pod phase becomes Failed.
100 pod_restart_policy_never = "never"
101
102 print("Loading Kubernetes configuration")
103 kube_client = get_kube_client()
104 print(f"Listing pods in namespace {namespace}")
105 airflow_pod_labels = [
106 "dag_id",
107 "task_id",
108 "try_number",
109 "airflow_version",
110 ]
111 list_kwargs = {"namespace": namespace, "limit": 500, "label_selector": ",".join(airflow_pod_labels)}
112
113 while True:
114 pod_list = kube_client.list_namespaced_pod(**list_kwargs)
115 for pod in pod_list.items:
116 pod_name = pod.metadata.name
117 print(f"Inspecting pod {pod_name}")
118 pod_phase = pod.status.phase.lower()
119 pod_reason = pod.status.reason.lower() if pod.status.reason else ""
120 pod_restart_policy = pod.spec.restart_policy.lower()
121 current_time = datetime.now(pod.metadata.creation_timestamp.tzinfo)
122
123 if (
124 pod_phase == pod_succeeded
125 or (pod_phase == pod_failed and pod_restart_policy == pod_restart_policy_never)
126 or (pod_reason == pod_reason_evicted)
127 or (
128 pod_phase == pod_pending
129 and current_time - pod.metadata.creation_timestamp
130 > timedelta(minutes=min_pending_minutes)
131 )
132 ):
133 print(
134 f'Deleting pod "{pod_name}" phase "{pod_phase}" and reason "{pod_reason}", '
135 f'restart policy "{pod_restart_policy}"'
136 )
137 try:
138 _delete_pod(pod.metadata.name, namespace)
139 except ApiException as e:
140 print(f"Can't remove POD: {e}", file=sys.stderr)
141 continue
142 print(f"No action taken on pod {pod_name}")
143 continue_token = pod_list.metadata._continue
144 if not continue_token:
145 break
146 list_kwargs["_continue"] = continue_token
147
148
149 def _delete_pod(name, namespace):
150 """Helper Function for cleanup_pods."""
151 core_v1 = client.CoreV1Api()
152 delete_options = client.V1DeleteOptions()
153 print(f'Deleting POD "{name}" from "{namespace}" namespace')
154 api_response = core_v1.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)
155 print(api_response)
156
[end of airflow/cli/commands/kubernetes_command.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/cli/commands/kubernetes_command.py b/airflow/cli/commands/kubernetes_command.py
--- a/airflow/cli/commands/kubernetes_command.py
+++ b/airflow/cli/commands/kubernetes_command.py
@@ -148,8 +148,8 @@
def _delete_pod(name, namespace):
"""Helper Function for cleanup_pods."""
- core_v1 = client.CoreV1Api()
+ kube_client = get_kube_client()
delete_options = client.V1DeleteOptions()
print(f'Deleting POD "{name}" from "{namespace}" namespace')
- api_response = core_v1.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)
+ api_response = kube_client.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)
print(api_response)
| {"golden_diff": "diff --git a/airflow/cli/commands/kubernetes_command.py b/airflow/cli/commands/kubernetes_command.py\n--- a/airflow/cli/commands/kubernetes_command.py\n+++ b/airflow/cli/commands/kubernetes_command.py\n@@ -148,8 +148,8 @@\n \n def _delete_pod(name, namespace):\n \"\"\"Helper Function for cleanup_pods.\"\"\"\n- core_v1 = client.CoreV1Api()\n+ kube_client = get_kube_client()\n delete_options = client.V1DeleteOptions()\n print(f'Deleting POD \"{name}\" from \"{namespace}\" namespace')\n- api_response = core_v1.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)\n+ api_response = kube_client.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)\n print(api_response)\n", "issue": "cleanup-pod CLI command fails due to incorrect host\n### Apache Airflow version\r\n\r\n2.6.1\r\n\r\n### What happened\r\n\r\nWhen running `airflow kubernetes cleanup-pods`, the API call to delete a pod fails. A snippet of the log is below:\r\n\r\n```\r\nurllib3.exceptions.MaxRetryError:\r\nHTTPConnectionPool(host='localhost', port=80): Max retries exceeded with url: /api/v1/namespaces/airflow/pods/my-task-avd79fq1 (Caused by NewConnectionError('<urllib3.connection.HTTPConnection object at 0x7f52f9aebfd0>: Failed to establish a new connection: [Errno 111] Connection refused'))\r\n```\r\n\r\n[The Kubernetes client provisioned in _delete_pod](https://github.com/apache/airflow/blob/main/airflow/cli/commands/kubernetes_command.py#L151) incorrectly has the host as `http:localhost`. On the scheduler pod if I start a Python environment I can see that the configuration differs from the `get_kube_client()` configuration:\r\n\r\n```\r\n>>> get_kube_client().api_client.configuration.host\r\n'https://172.20.0.1:443'\r\n>>> client.CoreV1Api().api_client.configuration.host\r\n'http://localhost/'\r\n```\r\n\r\nOn Airflow 2.5.3 these two clients have the same configuration.\r\n\r\nIt's possible I have some mistake in my configuration but I'm not sure what it could be. The above fails on 2.6.0 also.\r\n\r\n### What you think should happen instead\r\n\r\nPods should clean up without error\r\n\r\n### How to reproduce\r\n\r\nRun the following from a Kubernetes deployment of Airflow:\r\n\r\n```python\r\nfrom airflow.kubernetes.kube_client import get_kube_client\r\nfrom kubernetes import client\r\n\r\nprint(get_kube_client().api_client.configuration.host)\r\nprint(client.CoreV1Api().api_client.configuration.host)\r\n```\r\n\r\nAlternatively run `airflow kubernetes cleanup-pods` with pods available for cleanup\r\n\r\n### Operating System\r\n\r\nDebian GNU/Linux 11 (bullseye)\r\n\r\n### Versions of Apache Airflow Providers\r\n\r\n_No response_\r\n\r\n### Deployment\r\n\r\nOfficial Apache Airflow Helm Chart\r\n\r\n### Deployment details\r\n\r\nUsing `in_cluster` configuration for KubernetesExecutor\r\n\r\n### Anything else\r\n\r\n_No response_\r\n\r\n### Are you willing to submit PR?\r\n\r\n- [X] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Kubernetes sub-commands.\"\"\"\nfrom __future__ import annotations\n\nimport os\nimport sys\nfrom datetime import datetime, timedelta\n\nfrom kubernetes import client\nfrom kubernetes.client.api_client import ApiClient\nfrom kubernetes.client.rest import ApiException\n\nfrom airflow.executors.kubernetes_executor import KubeConfig, create_pod_id\nfrom airflow.kubernetes import pod_generator\nfrom airflow.kubernetes.kube_client import get_kube_client\nfrom airflow.kubernetes.pod_generator import PodGenerator\nfrom airflow.models import DagRun, TaskInstance\nfrom airflow.utils import cli as cli_utils, yaml\nfrom airflow.utils.cli import get_dag\n\n\n@cli_utils.action_cli\ndef generate_pod_yaml(args):\n \"\"\"Generates yaml files for each task in the DAG. Used for testing output of KubernetesExecutor.\"\"\"\n execution_date = args.execution_date\n dag = get_dag(subdir=args.subdir, dag_id=args.dag_id)\n yaml_output_path = args.output_path\n dr = DagRun(dag.dag_id, execution_date=execution_date)\n kube_config = KubeConfig()\n for task in dag.tasks:\n ti = TaskInstance(task, None)\n ti.dag_run = dr\n pod = PodGenerator.construct_pod(\n dag_id=args.dag_id,\n task_id=ti.task_id,\n pod_id=create_pod_id(args.dag_id, ti.task_id),\n try_number=ti.try_number,\n kube_image=kube_config.kube_image,\n date=ti.execution_date,\n args=ti.command_as_list(),\n pod_override_object=PodGenerator.from_obj(ti.executor_config),\n scheduler_job_id=\"worker-config\",\n namespace=kube_config.executor_namespace,\n base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file),\n with_mutation_hook=True,\n )\n api_client = ApiClient()\n date_string = pod_generator.datetime_to_label_safe_datestring(execution_date)\n yaml_file_name = f\"{args.dag_id}_{ti.task_id}_{date_string}.yml\"\n os.makedirs(os.path.dirname(yaml_output_path + \"/airflow_yaml_output/\"), exist_ok=True)\n with open(yaml_output_path + \"/airflow_yaml_output/\" + yaml_file_name, \"w\") as output:\n sanitized_pod = api_client.sanitize_for_serialization(pod)\n output.write(yaml.dump(sanitized_pod))\n print(f\"YAML output can be found at {yaml_output_path}/airflow_yaml_output/\")\n\n\n@cli_utils.action_cli\ndef cleanup_pods(args):\n \"\"\"Clean up k8s pods in evicted/failed/succeeded/pending states.\"\"\"\n namespace = args.namespace\n\n min_pending_minutes = args.min_pending_minutes\n # protect newly created pods from deletion\n if min_pending_minutes < 5:\n min_pending_minutes = 5\n\n # https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/\n # All Containers in the Pod have terminated in success, and will not be restarted.\n pod_succeeded = \"succeeded\"\n\n # The Pod has been accepted by the Kubernetes cluster,\n # but one or more of the containers has not been set up and made ready to run.\n pod_pending = \"pending\"\n\n # All Containers in the Pod have terminated, and at least one Container has terminated in failure.\n # That is, the Container either exited with non-zero status or was terminated by the system.\n pod_failed = \"failed\"\n\n # https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/\n pod_reason_evicted = \"evicted\"\n # If pod is failed and restartPolicy is:\n # * Always: Restart Container; Pod phase stays Running.\n # * OnFailure: Restart Container; Pod phase stays Running.\n # * Never: Pod phase becomes Failed.\n pod_restart_policy_never = \"never\"\n\n print(\"Loading Kubernetes configuration\")\n kube_client = get_kube_client()\n print(f\"Listing pods in namespace {namespace}\")\n airflow_pod_labels = [\n \"dag_id\",\n \"task_id\",\n \"try_number\",\n \"airflow_version\",\n ]\n list_kwargs = {\"namespace\": namespace, \"limit\": 500, \"label_selector\": \",\".join(airflow_pod_labels)}\n\n while True:\n pod_list = kube_client.list_namespaced_pod(**list_kwargs)\n for pod in pod_list.items:\n pod_name = pod.metadata.name\n print(f\"Inspecting pod {pod_name}\")\n pod_phase = pod.status.phase.lower()\n pod_reason = pod.status.reason.lower() if pod.status.reason else \"\"\n pod_restart_policy = pod.spec.restart_policy.lower()\n current_time = datetime.now(pod.metadata.creation_timestamp.tzinfo)\n\n if (\n pod_phase == pod_succeeded\n or (pod_phase == pod_failed and pod_restart_policy == pod_restart_policy_never)\n or (pod_reason == pod_reason_evicted)\n or (\n pod_phase == pod_pending\n and current_time - pod.metadata.creation_timestamp\n > timedelta(minutes=min_pending_minutes)\n )\n ):\n print(\n f'Deleting pod \"{pod_name}\" phase \"{pod_phase}\" and reason \"{pod_reason}\", '\n f'restart policy \"{pod_restart_policy}\"'\n )\n try:\n _delete_pod(pod.metadata.name, namespace)\n except ApiException as e:\n print(f\"Can't remove POD: {e}\", file=sys.stderr)\n continue\n print(f\"No action taken on pod {pod_name}\")\n continue_token = pod_list.metadata._continue\n if not continue_token:\n break\n list_kwargs[\"_continue\"] = continue_token\n\n\ndef _delete_pod(name, namespace):\n \"\"\"Helper Function for cleanup_pods.\"\"\"\n core_v1 = client.CoreV1Api()\n delete_options = client.V1DeleteOptions()\n print(f'Deleting POD \"{name}\" from \"{namespace}\" namespace')\n api_response = core_v1.delete_namespaced_pod(name=name, namespace=namespace, body=delete_options)\n print(api_response)\n", "path": "airflow/cli/commands/kubernetes_command.py"}]} | 2,888 | 180 |
gh_patches_debug_13922 | rasdani/github-patches | git_diff | huggingface__accelerate-445 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`psutil` required by utils/modeling.py but it isn't declared as a dependency
### System Info
```Shell
Accelerate `0.10.0.dev0` on Debian Bullseye running Python 3.10.5.
File "/opt/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py", line 276, in get_max_memory
import psutil
ModuleNotFoundError: No module named 'psutil'
```
I'm not sure if you have any minimum version you need to satisfy for `psutil` as a runtime dependency but I see that there are no constraints on it as a `test` dependency in setup.py.
If you don't have any requirements, I'm happy to just add it myself and open a patch PR.
Thanks!
```
### Information
- [ ] The official example scripts
- [ ] My own modified scripts
### Tasks
- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)
- [ ] My own task or dataset (give details below)
### Reproduction
Use `device_map="auto"` when loading any model that supports it.
### Expected behavior
```Shell
I expect that `psutil` is declared as a runtime dependency of the `accelerate` package instead of having to install it myself.
```
</issue>
<code>
[start of setup.py]
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import setup
16 from setuptools import find_packages
17
18 extras = {}
19 extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
20 extras["docs"] = []
21 extras["test"] = [
22 "psutil",
23 "pytest",
24 "pytest-xdist",
25 "pytest-subtests",
26 "datasets",
27 "evaluate",
28 "transformers",
29 "scipy",
30 "sklearn",
31 "parameterized",
32 "deepspeed",
33 ]
34
35 extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard"]
36 extras["dev"] = extras["quality"] + extras["test"]
37
38 extras["sagemaker"] = [
39 "sagemaker", # boto3 is a required package in sagemaker
40 ]
41
42 setup(
43 name="accelerate",
44 version="0.10.0.dev0",
45 description="Accelerate",
46 long_description=open("README.md", "r", encoding="utf-8").read(),
47 long_description_content_type="text/markdown",
48 keywords="deep learning",
49 license="Apache",
50 author="The HuggingFace team",
51 author_email="[email protected]",
52 url="https://github.com/huggingface/accelerate",
53 package_dir={"": "src"},
54 packages=find_packages("src"),
55 entry_points={
56 "console_scripts": [
57 "accelerate=accelerate.commands.accelerate_cli:main",
58 "accelerate-config=accelerate.commands.config:main",
59 "accelerate-launch=accelerate.commands.launch:main",
60 ]
61 },
62 python_requires=">=3.7.0",
63 install_requires=["numpy>=1.17", "packaging>=20.0", "pyyaml", "torch>=1.4.0"],
64 extras_require=extras,
65 classifiers=[
66 "Development Status :: 5 - Production/Stable",
67 "Intended Audience :: Developers",
68 "Intended Audience :: Education",
69 "Intended Audience :: Science/Research",
70 "License :: OSI Approved :: Apache Software License",
71 "Operating System :: OS Independent",
72 "Programming Language :: Python :: 3",
73 "Programming Language :: Python :: 3.7",
74 "Topic :: Scientific/Engineering :: Artificial Intelligence",
75 ],
76 )
77
78 # Release checklist
79 # 1. Change the version in __init__.py and setup.py.
80 # 2. Commit these changes with the message: "Release: VERSION"
81 # 3. Add a tag in git to mark the release: "git tag VERSION -m 'Adds tag VERSION for pypi' "
82 # Push the tag to git: git push --tags origin main
83 # 4. Run the following commands in the top-level directory:
84 # python setup.py bdist_wheel
85 # python setup.py sdist
86 # 5. Upload the package to the pypi test server first:
87 # twine upload dist/* -r pypitest
88 # twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/
89 # 6. Check that you can install it in a virtualenv by running:
90 # pip install -i https://testpypi.python.org/pypi accelerate
91 # accelerate env
92 # accelerate test
93 # 7. Upload the final version to actual pypi:
94 # twine upload dist/* -r pypi
95 # 8. Add release notes to the tag in github once everything is looking hunky-dory.
96 # 9. Update the version in __init__.py, setup.py to the new version "-dev" and push to master
97
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,6 @@
extras["quality"] = ["black ~= 22.0", "isort >= 5.5.4", "flake8 >= 3.8.3"]
extras["docs"] = []
extras["test"] = [
- "psutil",
"pytest",
"pytest-xdist",
"pytest-subtests",
@@ -60,7 +59,7 @@
]
},
python_requires=">=3.7.0",
- install_requires=["numpy>=1.17", "packaging>=20.0", "pyyaml", "torch>=1.4.0"],
+ install_requires=["numpy>=1.17", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.4.0"],
extras_require=extras,
classifiers=[
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,6 @@\n extras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\n extras[\"docs\"] = []\n extras[\"test\"] = [\n- \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n@@ -60,7 +59,7 @@\n ]\n },\n python_requires=\">=3.7.0\",\n- install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n+ install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"psutil\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "`psutil` required by utils/modeling.py but it isn't declared as a dependency\n### System Info\n\n```Shell\nAccelerate `0.10.0.dev0` on Debian Bullseye running Python 3.10.5.\r\n\r\n\r\nFile \"/opt/venv/lib/python3.10/site-packages/accelerate/utils/modeling.py\", line 276, in get_max_memory\r\n import psutil\r\nModuleNotFoundError: No module named 'psutil'\r\n```\r\n\r\nI'm not sure if you have any minimum version you need to satisfy for `psutil` as a runtime dependency but I see that there are no constraints on it as a `test` dependency in setup.py.\r\n\r\nIf you don't have any requirements, I'm happy to just add it myself and open a patch PR.\r\n\r\nThanks!\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [ ] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [ ] My own task or dataset (give details below)\n\n### Reproduction\n\nUse `device_map=\"auto\"` when loading any model that supports it.\n\n### Expected behavior\n\n```Shell\nI expect that `psutil` is declared as a runtime dependency of the `accelerate` package instead of having to install it myself.\n```\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nextras = {}\nextras[\"quality\"] = [\"black ~= 22.0\", \"isort >= 5.5.4\", \"flake8 >= 3.8.3\"]\nextras[\"docs\"] = []\nextras[\"test\"] = [\n \"psutil\",\n \"pytest\",\n \"pytest-xdist\",\n \"pytest-subtests\",\n \"datasets\",\n \"evaluate\",\n \"transformers\",\n \"scipy\",\n \"sklearn\",\n \"parameterized\",\n \"deepspeed\",\n]\n\nextras[\"test_trackers\"] = [\"wandb\", \"comet-ml\", \"tensorboard\"]\nextras[\"dev\"] = extras[\"quality\"] + extras[\"test\"]\n\nextras[\"sagemaker\"] = [\n \"sagemaker\", # boto3 is a required package in sagemaker\n]\n\nsetup(\n name=\"accelerate\",\n version=\"0.10.0.dev0\",\n description=\"Accelerate\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n keywords=\"deep learning\",\n license=\"Apache\",\n author=\"The HuggingFace team\",\n author_email=\"[email protected]\",\n url=\"https://github.com/huggingface/accelerate\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n entry_points={\n \"console_scripts\": [\n \"accelerate=accelerate.commands.accelerate_cli:main\",\n \"accelerate-config=accelerate.commands.config:main\",\n \"accelerate-launch=accelerate.commands.launch:main\",\n ]\n },\n python_requires=\">=3.7.0\",\n install_requires=[\"numpy>=1.17\", \"packaging>=20.0\", \"pyyaml\", \"torch>=1.4.0\"],\n extras_require=extras,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n\n# Release checklist\n# 1. Change the version in __init__.py and setup.py.\n# 2. Commit these changes with the message: \"Release: VERSION\"\n# 3. Add a tag in git to mark the release: \"git tag VERSION -m 'Adds tag VERSION for pypi' \"\n# Push the tag to git: git push --tags origin main\n# 4. Run the following commands in the top-level directory:\n# python setup.py bdist_wheel\n# python setup.py sdist\n# 5. Upload the package to the pypi test server first:\n# twine upload dist/* -r pypitest\n# twine upload dist/* -r pypitest --repository-url=https://test.pypi.org/legacy/\n# 6. Check that you can install it in a virtualenv by running:\n# pip install -i https://testpypi.python.org/pypi accelerate\n# accelerate env\n# accelerate test\n# 7. Upload the final version to actual pypi:\n# twine upload dist/* -r pypi\n# 8. Add release notes to the tag in github once everything is looking hunky-dory.\n# 9. Update the version in __init__.py, setup.py to the new version \"-dev\" and push to master\n", "path": "setup.py"}]} | 1,949 | 222 |
gh_patches_debug_9144 | rasdani/github-patches | git_diff | Cloud-CV__EvalAI-1305 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add challenge filtering using featured parameter.
We need to add filtering in challenge model on the basis of `featured` parameter.
Add challenge filtering using featured parameter.
We need to add filtering in challenge model on the basis of `featured` parameter.
</issue>
<code>
[start of apps/challenges/admin.py]
1 from django.contrib import admin
2
3 from base.admin import ImportExportTimeStampedAdmin
4
5 from .models import (Challenge,
6 ChallengeConfiguration,
7 ChallengePhase,
8 ChallengePhaseSplit,
9 DatasetSplit,
10 Leaderboard,
11 LeaderboardData,
12 StarChallenge,)
13
14
15 @admin.register(Challenge)
16 class ChallengeAdmin(ImportExportTimeStampedAdmin):
17 list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard")
18 list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard")
19 search_fields = ("title", "creator")
20
21
22 @admin.register(DatasetSplit)
23 class DatasetSplitAdmin(ImportExportTimeStampedAdmin):
24 list_display = ("name", "codename")
25 list_filter = ("name", "codename")
26 search_fields = ("name", "codename")
27
28
29 @admin.register(ChallengePhase)
30 class ChallengePhaseAdmin(ImportExportTimeStampedAdmin):
31 list_display = ("name", "challenge", "start_date", "end_date", "test_annotation", "is_public", "leaderboard_public")
32 list_filter = ("leaderboard_public", "challenge")
33 search_fields = ("name",)
34
35
36 @admin.register(Leaderboard)
37 class LeaderboardAdmin(ImportExportTimeStampedAdmin):
38 list_display = ("id", "schema")
39 search_fields = ("id",)
40
41
42 @admin.register(ChallengePhaseSplit)
43 class ChallengePhaseSplitAdmin(ImportExportTimeStampedAdmin):
44 list_display = ("id", "challenge_phase", "dataset_split", "leaderboard", "visibility")
45 list_filter = ("challenge_phase", "dataset_split", "leaderboard", "visibility")
46 search_fields = ("challenge_phase", "dataset_split", "leaderboard")
47
48
49 @admin.register(LeaderboardData)
50 class LeaderboardDataAdmin(ImportExportTimeStampedAdmin):
51 list_display = ("challenge_phase_split", "submission", "leaderboard", "result")
52 list_filter = ("challenge_phase_split", "leaderboard",)
53 search_fields = ("challenge_phase_split", "submission", "leaderboard", "result")
54
55
56 @admin.register(ChallengeConfiguration)
57 class ChallengeConfigurationAdmin(ImportExportTimeStampedAdmin):
58 list_display = ('user', 'challenge', 'is_created', 'zip_configuration',)
59 list_filter = ('user', 'is_created',)
60 search_fields = ('user', 'challenge',)
61
62
63 @admin.register(StarChallenge)
64 class StarChallengeAdmin(ImportExportTimeStampedAdmin):
65 list_display = ('user', 'challenge', 'is_starred')
66 search_fields = ('user', 'challenge',)
67
[end of apps/challenges/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/challenges/admin.py b/apps/challenges/admin.py
--- a/apps/challenges/admin.py
+++ b/apps/challenges/admin.py
@@ -14,8 +14,9 @@
@admin.register(Challenge)
class ChallengeAdmin(ImportExportTimeStampedAdmin):
- list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard")
- list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard")
+ list_display = ("title", "start_date", "end_date", "creator", "published", "enable_forum", "anonymous_leaderboard",
+ "featured")
+ list_filter = ("creator", "published", "enable_forum", "anonymous_leaderboard", "featured")
search_fields = ("title", "creator")
| {"golden_diff": "diff --git a/apps/challenges/admin.py b/apps/challenges/admin.py\n--- a/apps/challenges/admin.py\n+++ b/apps/challenges/admin.py\n@@ -14,8 +14,9 @@\n \n @admin.register(Challenge)\n class ChallengeAdmin(ImportExportTimeStampedAdmin):\n- list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n- list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n+ list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\",\n+ \"featured\")\n+ list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\", \"featured\")\n search_fields = (\"title\", \"creator\")\n", "issue": "Add challenge filtering using featured parameter.\nWe need to add filtering in challenge model on the basis of `featured` parameter.\nAdd challenge filtering using featured parameter.\nWe need to add filtering in challenge model on the basis of `featured` parameter.\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom base.admin import ImportExportTimeStampedAdmin\n\nfrom .models import (Challenge,\n ChallengeConfiguration,\n ChallengePhase,\n ChallengePhaseSplit,\n DatasetSplit,\n Leaderboard,\n LeaderboardData,\n StarChallenge,)\n\n\[email protected](Challenge)\nclass ChallengeAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"title\", \"start_date\", \"end_date\", \"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n list_filter = (\"creator\", \"published\", \"enable_forum\", \"anonymous_leaderboard\")\n search_fields = (\"title\", \"creator\")\n\n\[email protected](DatasetSplit)\nclass DatasetSplitAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"name\", \"codename\")\n list_filter = (\"name\", \"codename\")\n search_fields = (\"name\", \"codename\")\n\n\[email protected](ChallengePhase)\nclass ChallengePhaseAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"name\", \"challenge\", \"start_date\", \"end_date\", \"test_annotation\", \"is_public\", \"leaderboard_public\")\n list_filter = (\"leaderboard_public\", \"challenge\")\n search_fields = (\"name\",)\n\n\[email protected](Leaderboard)\nclass LeaderboardAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"id\", \"schema\")\n search_fields = (\"id\",)\n\n\[email protected](ChallengePhaseSplit)\nclass ChallengePhaseSplitAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"id\", \"challenge_phase\", \"dataset_split\", \"leaderboard\", \"visibility\")\n list_filter = (\"challenge_phase\", \"dataset_split\", \"leaderboard\", \"visibility\")\n search_fields = (\"challenge_phase\", \"dataset_split\", \"leaderboard\")\n\n\[email protected](LeaderboardData)\nclass LeaderboardDataAdmin(ImportExportTimeStampedAdmin):\n list_display = (\"challenge_phase_split\", \"submission\", \"leaderboard\", \"result\")\n list_filter = (\"challenge_phase_split\", \"leaderboard\",)\n search_fields = (\"challenge_phase_split\", \"submission\", \"leaderboard\", \"result\")\n\n\[email protected](ChallengeConfiguration)\nclass ChallengeConfigurationAdmin(ImportExportTimeStampedAdmin):\n list_display = ('user', 'challenge', 'is_created', 'zip_configuration',)\n list_filter = ('user', 'is_created',)\n search_fields = ('user', 'challenge',)\n\n\[email protected](StarChallenge)\nclass StarChallengeAdmin(ImportExportTimeStampedAdmin):\n list_display = ('user', 'challenge', 'is_starred')\n search_fields = ('user', 'challenge',)\n", "path": "apps/challenges/admin.py"}]} | 1,259 | 181 |
gh_patches_debug_49480 | rasdani/github-patches | git_diff | numpy__numpy-15425 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
dir(numpy) returns duplicate "testing"
<!-- Please describe the issue in detail here, and fill in the fields below -->
### Reproducing code example:
<!-- A short code example that reproduces the problem/missing feature. It should be
self-contained, i.e., possible to run as-is via 'python myproblem.py' -->
```python
import numpy as np
>>> np.__version__
'1.18.1'
>>> len(dir(np))
620
>>> np.testing
<module 'numpy.testing' from 'C:\\Python\\Python38\\lib\\site-packages\\numpy\\testing\\__init__.py'>
>>> len(dir(np))
621
>>> [i for i in dir(np) if i == "testing"]
['testing', 'testing']
```
### Error:
"testing" appears twice in dir(np)
### Numpy/Python version information:
<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->
Python 3.8.0 (tags/v3.8.0:fa919fd, Oct 14 2019, 19:37:50) [MSC v.1916 64 bit (AMD64)] on win32
>>> np.__version__
'1.18.1'
</issue>
<code>
[start of numpy/__init__.py]
1 """
2 NumPy
3 =====
4
5 Provides
6 1. An array object of arbitrary homogeneous items
7 2. Fast mathematical operations over arrays
8 3. Linear Algebra, Fourier Transforms, Random Number Generation
9
10 How to use the documentation
11 ----------------------------
12 Documentation is available in two forms: docstrings provided
13 with the code, and a loose standing reference guide, available from
14 `the NumPy homepage <https://www.scipy.org>`_.
15
16 We recommend exploring the docstrings using
17 `IPython <https://ipython.org>`_, an advanced Python shell with
18 TAB-completion and introspection capabilities. See below for further
19 instructions.
20
21 The docstring examples assume that `numpy` has been imported as `np`::
22
23 >>> import numpy as np
24
25 Code snippets are indicated by three greater-than signs::
26
27 >>> x = 42
28 >>> x = x + 1
29
30 Use the built-in ``help`` function to view a function's docstring::
31
32 >>> help(np.sort)
33 ... # doctest: +SKIP
34
35 For some objects, ``np.info(obj)`` may provide additional help. This is
36 particularly true if you see the line "Help on ufunc object:" at the top
37 of the help() page. Ufuncs are implemented in C, not Python, for speed.
38 The native Python help() does not know how to view their help, but our
39 np.info() function does.
40
41 To search for documents containing a keyword, do::
42
43 >>> np.lookfor('keyword')
44 ... # doctest: +SKIP
45
46 General-purpose documents like a glossary and help on the basic concepts
47 of numpy are available under the ``doc`` sub-module::
48
49 >>> from numpy import doc
50 >>> help(doc)
51 ... # doctest: +SKIP
52
53 Available subpackages
54 ---------------------
55 doc
56 Topical documentation on broadcasting, indexing, etc.
57 lib
58 Basic functions used by several sub-packages.
59 random
60 Core Random Tools
61 linalg
62 Core Linear Algebra Tools
63 fft
64 Core FFT routines
65 polynomial
66 Polynomial tools
67 testing
68 NumPy testing tools
69 f2py
70 Fortran to Python Interface Generator.
71 distutils
72 Enhancements to distutils with support for
73 Fortran compilers support and more.
74
75 Utilities
76 ---------
77 test
78 Run numpy unittests
79 show_config
80 Show numpy build configuration
81 dual
82 Overwrite certain functions with high-performance Scipy tools
83 matlib
84 Make everything matrices.
85 __version__
86 NumPy version string
87
88 Viewing documentation using IPython
89 -----------------------------------
90 Start IPython with the NumPy profile (``ipython -p numpy``), which will
91 import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
92 paste examples into the shell. To see which functions are available in
93 `numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
94 ``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
95 down the list. To view the docstring for a function, use
96 ``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
97 the source code).
98
99 Copies vs. in-place operation
100 -----------------------------
101 Most of the functions in `numpy` return a copy of the array argument
102 (e.g., `np.sort`). In-place versions of these functions are often
103 available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
104 Exceptions to this rule are documented.
105
106 """
107 import sys
108 import warnings
109
110 from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
111 from ._globals import _NoValue
112
113 # We first need to detect if we're being called as part of the numpy setup
114 # procedure itself in a reliable manner.
115 try:
116 __NUMPY_SETUP__
117 except NameError:
118 __NUMPY_SETUP__ = False
119
120 if __NUMPY_SETUP__:
121 sys.stderr.write('Running from numpy source directory.\n')
122 else:
123 try:
124 from numpy.__config__ import show as show_config
125 except ImportError:
126 msg = """Error importing numpy: you should not try to import numpy from
127 its source directory; please exit the numpy source tree, and relaunch
128 your python interpreter from there."""
129 raise ImportError(msg)
130
131 from .version import git_revision as __git_revision__
132 from .version import version as __version__
133
134 __all__ = ['ModuleDeprecationWarning',
135 'VisibleDeprecationWarning']
136
137 # Allow distributors to run custom init code
138 from . import _distributor_init
139
140 from . import core
141 from .core import *
142 from . import compat
143 from . import lib
144 # FIXME: why have numpy.lib if everything is imported here??
145 from .lib import *
146
147 from . import linalg
148 from . import fft
149 from . import polynomial
150 from . import random
151 from . import ctypeslib
152 from . import ma
153 from . import matrixlib as _mat
154 from .matrixlib import *
155 from .compat import long
156
157 # Make these accessible from numpy name-space
158 # but not imported in from numpy import *
159 # TODO[gh-6103]: Deprecate these
160 if sys.version_info[0] >= 3:
161 from builtins import bool, int, float, complex, object, str
162 unicode = str
163 else:
164 from __builtin__ import bool, int, float, complex, object, unicode, str
165
166 from .core import round, abs, max, min
167 # now that numpy modules are imported, can initialize limits
168 core.getlimits._register_known_types()
169
170 __all__.extend(['__version__', 'show_config'])
171 __all__.extend(core.__all__)
172 __all__.extend(_mat.__all__)
173 __all__.extend(lib.__all__)
174 __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
175
176 # These are added by `from .core import *` and `core.__all__`, but we
177 # overwrite them above with builtins we do _not_ want to export.
178 __all__.remove('long')
179 __all__.remove('unicode')
180
181 # Remove things that are in the numpy.lib but not in the numpy namespace
182 # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
183 # that prevents adding more things to the main namespace by accident.
184 # The list below will grow until the `from .lib import *` fixme above is
185 # taken care of
186 __all__.remove('Arrayterator')
187 del Arrayterator
188
189 # Filter out Cython harmless warnings
190 warnings.filterwarnings("ignore", message="numpy.dtype size changed")
191 warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
192 warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
193
194 # oldnumeric and numarray were removed in 1.9. In case some packages import
195 # but do not use them, we define them here for backward compatibility.
196 oldnumeric = 'removed'
197 numarray = 'removed'
198
199 if sys.version_info[:2] >= (3, 7):
200 # Importing Tester requires importing all of UnitTest which is not a
201 # cheap import Since it is mainly used in test suits, we lazy import it
202 # here to save on the order of 10 ms of import time for most users
203 #
204 # The previous way Tester was imported also had a side effect of adding
205 # the full `numpy.testing` namespace
206 #
207 # module level getattr is only supported in 3.7 onwards
208 # https://www.python.org/dev/peps/pep-0562/
209 def __getattr__(attr):
210 if attr == 'testing':
211 import numpy.testing as testing
212 return testing
213 elif attr == 'Tester':
214 from .testing import Tester
215 return Tester
216 else:
217 raise AttributeError("module {!r} has no attribute "
218 "{!r}".format(__name__, attr))
219
220 def __dir__():
221 return list(globals().keys()) + ['Tester', 'testing']
222
223 else:
224 # We don't actually use this ourselves anymore, but I'm not 100% sure that
225 # no-one else in the world is using it (though I hope not)
226 from .testing import Tester
227
228 # Pytest testing
229 from numpy._pytesttester import PytestTester
230 test = PytestTester(__name__)
231 del PytestTester
232
233
234 def _sanity_check():
235 """
236 Quick sanity checks for common bugs caused by environment.
237 There are some cases e.g. with wrong BLAS ABI that cause wrong
238 results under specific runtime conditions that are not necessarily
239 achieved during test suite runs, and it is useful to catch those early.
240
241 See https://github.com/numpy/numpy/issues/8577 and other
242 similar bug reports.
243
244 """
245 try:
246 x = ones(2, dtype=float32)
247 if not abs(x.dot(x) - 2.0) < 1e-5:
248 raise AssertionError()
249 except AssertionError:
250 msg = ("The current Numpy installation ({!r}) fails to "
251 "pass simple sanity checks. This can be caused for example "
252 "by incorrect BLAS library being linked in, or by mixing "
253 "package managers (pip, conda, apt, ...). Search closed "
254 "numpy issues for similar problems.")
255 raise RuntimeError(msg.format(__file__))
256
257 _sanity_check()
258 del _sanity_check
259
[end of numpy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/__init__.py b/numpy/__init__.py
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -218,7 +218,7 @@
"{!r}".format(__name__, attr))
def __dir__():
- return list(globals().keys()) + ['Tester', 'testing']
+ return list(globals().keys() | {'Tester', 'testing'})
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
| {"golden_diff": "diff --git a/numpy/__init__.py b/numpy/__init__.py\n--- a/numpy/__init__.py\n+++ b/numpy/__init__.py\n@@ -218,7 +218,7 @@\n \"{!r}\".format(__name__, attr))\n \n def __dir__():\n- return list(globals().keys()) + ['Tester', 'testing']\n+ return list(globals().keys() | {'Tester', 'testing'})\n \n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n", "issue": "dir(numpy) returns duplicate \"testing\"\n<!-- Please describe the issue in detail here, and fill in the fields below -->\r\n\r\n### Reproducing code example:\r\n\r\n<!-- A short code example that reproduces the problem/missing feature. It should be\r\nself-contained, i.e., possible to run as-is via 'python myproblem.py' -->\r\n\r\n```python\r\nimport numpy as np\r\n>>> np.__version__\r\n'1.18.1'\r\n>>> len(dir(np))\r\n620\r\n>>> np.testing\r\n<module 'numpy.testing' from 'C:\\\\Python\\\\Python38\\\\lib\\\\site-packages\\\\numpy\\\\testing\\\\__init__.py'>\r\n>>> len(dir(np))\r\n621\r\n>>> [i for i in dir(np) if i == \"testing\"]\r\n['testing', 'testing']\r\n```\r\n### Error:\r\n\"testing\" appears twice in dir(np)\r\n\r\n\r\n### Numpy/Python version information:\r\n\r\n<!-- Output from 'import sys, numpy; print(numpy.__version__, sys.version)' -->\r\nPython 3.8.0 (tags/v3.8.0:fa919fd, Oct 14 2019, 19:37:50) [MSC v.1916 64 bit (AMD64)] on win32\r\n>>> np.__version__\r\n'1.18.1'\n", "before_files": [{"content": "\"\"\"\nNumPy\n=====\n\nProvides\n 1. An array object of arbitrary homogeneous items\n 2. Fast mathematical operations over arrays\n 3. Linear Algebra, Fourier Transforms, Random Number Generation\n\nHow to use the documentation\n----------------------------\nDocumentation is available in two forms: docstrings provided\nwith the code, and a loose standing reference guide, available from\n`the NumPy homepage <https://www.scipy.org>`_.\n\nWe recommend exploring the docstrings using\n`IPython <https://ipython.org>`_, an advanced Python shell with\nTAB-completion and introspection capabilities. See below for further\ninstructions.\n\nThe docstring examples assume that `numpy` has been imported as `np`::\n\n >>> import numpy as np\n\nCode snippets are indicated by three greater-than signs::\n\n >>> x = 42\n >>> x = x + 1\n\nUse the built-in ``help`` function to view a function's docstring::\n\n >>> help(np.sort)\n ... # doctest: +SKIP\n\nFor some objects, ``np.info(obj)`` may provide additional help. This is\nparticularly true if you see the line \"Help on ufunc object:\" at the top\nof the help() page. Ufuncs are implemented in C, not Python, for speed.\nThe native Python help() does not know how to view their help, but our\nnp.info() function does.\n\nTo search for documents containing a keyword, do::\n\n >>> np.lookfor('keyword')\n ... # doctest: +SKIP\n\nGeneral-purpose documents like a glossary and help on the basic concepts\nof numpy are available under the ``doc`` sub-module::\n\n >>> from numpy import doc\n >>> help(doc)\n ... # doctest: +SKIP\n\nAvailable subpackages\n---------------------\ndoc\n Topical documentation on broadcasting, indexing, etc.\nlib\n Basic functions used by several sub-packages.\nrandom\n Core Random Tools\nlinalg\n Core Linear Algebra Tools\nfft\n Core FFT routines\npolynomial\n Polynomial tools\ntesting\n NumPy testing tools\nf2py\n Fortran to Python Interface Generator.\ndistutils\n Enhancements to distutils with support for\n Fortran compilers support and more.\n\nUtilities\n---------\ntest\n Run numpy unittests\nshow_config\n Show numpy build configuration\ndual\n Overwrite certain functions with high-performance Scipy tools\nmatlib\n Make everything matrices.\n__version__\n NumPy version string\n\nViewing documentation using IPython\n-----------------------------------\nStart IPython with the NumPy profile (``ipython -p numpy``), which will\nimport `numpy` under the alias `np`. Then, use the ``cpaste`` command to\npaste examples into the shell. To see which functions are available in\n`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use\n``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow\ndown the list. To view the docstring for a function, use\n``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view\nthe source code).\n\nCopies vs. in-place operation\n-----------------------------\nMost of the functions in `numpy` return a copy of the array argument\n(e.g., `np.sort`). In-place versions of these functions are often\navailable as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.\nExceptions to this rule are documented.\n\n\"\"\"\nimport sys\nimport warnings\n\nfrom ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning\nfrom ._globals import _NoValue\n\n# We first need to detect if we're being called as part of the numpy setup\n# procedure itself in a reliable manner.\ntry:\n __NUMPY_SETUP__\nexcept NameError:\n __NUMPY_SETUP__ = False\n\nif __NUMPY_SETUP__:\n sys.stderr.write('Running from numpy source directory.\\n')\nelse:\n try:\n from numpy.__config__ import show as show_config\n except ImportError:\n msg = \"\"\"Error importing numpy: you should not try to import numpy from\n its source directory; please exit the numpy source tree, and relaunch\n your python interpreter from there.\"\"\"\n raise ImportError(msg)\n\n from .version import git_revision as __git_revision__\n from .version import version as __version__\n\n __all__ = ['ModuleDeprecationWarning',\n 'VisibleDeprecationWarning']\n\n # Allow distributors to run custom init code\n from . import _distributor_init\n\n from . import core\n from .core import *\n from . import compat\n from . import lib\n # FIXME: why have numpy.lib if everything is imported here??\n from .lib import *\n\n from . import linalg\n from . import fft\n from . import polynomial\n from . import random\n from . import ctypeslib\n from . import ma\n from . import matrixlib as _mat\n from .matrixlib import *\n from .compat import long\n\n # Make these accessible from numpy name-space\n # but not imported in from numpy import *\n # TODO[gh-6103]: Deprecate these\n if sys.version_info[0] >= 3:\n from builtins import bool, int, float, complex, object, str\n unicode = str\n else:\n from __builtin__ import bool, int, float, complex, object, unicode, str\n\n from .core import round, abs, max, min\n # now that numpy modules are imported, can initialize limits\n core.getlimits._register_known_types()\n\n __all__.extend(['__version__', 'show_config'])\n __all__.extend(core.__all__)\n __all__.extend(_mat.__all__)\n __all__.extend(lib.__all__)\n __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])\n\n # These are added by `from .core import *` and `core.__all__`, but we\n # overwrite them above with builtins we do _not_ want to export.\n __all__.remove('long')\n __all__.remove('unicode')\n\n # Remove things that are in the numpy.lib but not in the numpy namespace\n # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)\n # that prevents adding more things to the main namespace by accident.\n # The list below will grow until the `from .lib import *` fixme above is\n # taken care of\n __all__.remove('Arrayterator')\n del Arrayterator\n\n # Filter out Cython harmless warnings\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n # oldnumeric and numarray were removed in 1.9. In case some packages import\n # but do not use them, we define them here for backward compatibility.\n oldnumeric = 'removed'\n numarray = 'removed'\n\n if sys.version_info[:2] >= (3, 7):\n # Importing Tester requires importing all of UnitTest which is not a\n # cheap import Since it is mainly used in test suits, we lazy import it\n # here to save on the order of 10 ms of import time for most users\n #\n # The previous way Tester was imported also had a side effect of adding\n # the full `numpy.testing` namespace\n #\n # module level getattr is only supported in 3.7 onwards\n # https://www.python.org/dev/peps/pep-0562/\n def __getattr__(attr):\n if attr == 'testing':\n import numpy.testing as testing\n return testing\n elif attr == 'Tester':\n from .testing import Tester\n return Tester\n else:\n raise AttributeError(\"module {!r} has no attribute \"\n \"{!r}\".format(__name__, attr))\n\n def __dir__():\n return list(globals().keys()) + ['Tester', 'testing']\n\n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n # no-one else in the world is using it (though I hope not)\n from .testing import Tester\n\n # Pytest testing\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\n\n\n def _sanity_check():\n \"\"\"\n Quick sanity checks for common bugs caused by environment.\n There are some cases e.g. with wrong BLAS ABI that cause wrong\n results under specific runtime conditions that are not necessarily\n achieved during test suite runs, and it is useful to catch those early.\n\n See https://github.com/numpy/numpy/issues/8577 and other\n similar bug reports.\n\n \"\"\"\n try:\n x = ones(2, dtype=float32)\n if not abs(x.dot(x) - 2.0) < 1e-5:\n raise AssertionError()\n except AssertionError:\n msg = (\"The current Numpy installation ({!r}) fails to \"\n \"pass simple sanity checks. This can be caused for example \"\n \"by incorrect BLAS library being linked in, or by mixing \"\n \"package managers (pip, conda, apt, ...). Search closed \"\n \"numpy issues for similar problems.\")\n raise RuntimeError(msg.format(__file__))\n\n _sanity_check()\n del _sanity_check\n", "path": "numpy/__init__.py"}]} | 3,591 | 127 |
gh_patches_debug_5820 | rasdani/github-patches | git_diff | aws__aws-cli-761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
datapipeline query-object --query argument is shadowed
The top level `--query` option shadows the `--query` from datapipeline query-object. This can be addressed in the argrename customization model. We can also take this opportunity to remove the `cli_name` from the `.extra.json` files in botocore.
</issue>
<code>
[start of awscli/customizations/argrename.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 """
15
16 from awscli.customizations import utils
17
18
19 ARGUMENT_RENAMES = {
20 # Mapping of original arg to renamed arg.
21 # The key is <service>.<operation>.argname
22 # The first part of the key is used for event registration
23 # so if you wanted to rename something for an entire service you
24 # could say 'ec2.*.dry-run': 'renamed-arg-name', or if you wanted
25 # to rename across all services you could say '*.*.dry-run': 'new-name'.
26 'ec2.create-image.no-no-reboot': 'reboot',
27 'ec2.*.no-egress': 'ingress',
28 'ec2.*.no-disable-api-termination': 'enable-api-termination',
29 }
30
31
32 def register_arg_renames(cli):
33 for original, new_name in ARGUMENT_RENAMES.items():
34 event_portion, original_arg_name = original.rsplit('.', 1)
35 cli.register('building-argument-table.%s' % event_portion,
36 rename_arg(original_arg_name, new_name))
37
38
39 def rename_arg(original_arg_name, new_name):
40 def _rename_arg(argument_table, **kwargs):
41 if original_arg_name in argument_table:
42 utils.rename_argument(argument_table, original_arg_name, new_name)
43 return _rename_arg
44
[end of awscli/customizations/argrename.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py
--- a/awscli/customizations/argrename.py
+++ b/awscli/customizations/argrename.py
@@ -26,6 +26,11 @@
'ec2.create-image.no-no-reboot': 'reboot',
'ec2.*.no-egress': 'ingress',
'ec2.*.no-disable-api-termination': 'enable-api-termination',
+ 'opsworks.*.region': 'stack-region',
+ 'elastictranscoder.*.output': 'job-output',
+ 'swf.register-activity-type.version': 'activity-version',
+ 'swf.register-workflow-type.version': 'workflow-version',
+ 'datapipeline.*.query': 'objects-query',
}
| {"golden_diff": "diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py\n--- a/awscli/customizations/argrename.py\n+++ b/awscli/customizations/argrename.py\n@@ -26,6 +26,11 @@\n 'ec2.create-image.no-no-reboot': 'reboot',\n 'ec2.*.no-egress': 'ingress',\n 'ec2.*.no-disable-api-termination': 'enable-api-termination',\n+ 'opsworks.*.region': 'stack-region',\n+ 'elastictranscoder.*.output': 'job-output',\n+ 'swf.register-activity-type.version': 'activity-version',\n+ 'swf.register-workflow-type.version': 'workflow-version',\n+ 'datapipeline.*.query': 'objects-query',\n }\n", "issue": "datapipeline query-object --query argument is shadowed\nThe top level `--query` option shadows the `--query` from datapipeline query-object. This can be addressed in the argrename customization model. We can also take this opportunity to remove the `cli_name` from the `.extra.json` files in botocore.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\n\"\"\"\n\nfrom awscli.customizations import utils\n\n\nARGUMENT_RENAMES = {\n # Mapping of original arg to renamed arg.\n # The key is <service>.<operation>.argname\n # The first part of the key is used for event registration\n # so if you wanted to rename something for an entire service you\n # could say 'ec2.*.dry-run': 'renamed-arg-name', or if you wanted\n # to rename across all services you could say '*.*.dry-run': 'new-name'.\n 'ec2.create-image.no-no-reboot': 'reboot',\n 'ec2.*.no-egress': 'ingress',\n 'ec2.*.no-disable-api-termination': 'enable-api-termination',\n}\n\n\ndef register_arg_renames(cli):\n for original, new_name in ARGUMENT_RENAMES.items():\n event_portion, original_arg_name = original.rsplit('.', 1)\n cli.register('building-argument-table.%s' % event_portion,\n rename_arg(original_arg_name, new_name))\n\n\ndef rename_arg(original_arg_name, new_name):\n def _rename_arg(argument_table, **kwargs):\n if original_arg_name in argument_table:\n utils.rename_argument(argument_table, original_arg_name, new_name)\n return _rename_arg\n", "path": "awscli/customizations/argrename.py"}]} | 1,108 | 176 |
gh_patches_debug_8313 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-933 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add "epoch" options to basic templates
## 🚀 Feature
<!-- A clear and concise description of the feature proposal -->
Add "epochs" option to parser of 'basic_examples/lightning_module_template.py'
### Motivation
<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->
Thanks to 'basic_examples/lightning_module_template.py', I could build my deep learning model. Some beginners like me might build their model from this basic template. However, there are no options to manipulate epochs. I just thought that what people use often should be included in the basic template, so I uploaded my issue.
### Pitch
<!-- A clear and concise description of what you want to happen. -->
I suggest that the basic template includes "epoch" option in the basic template.
### Alternatives
<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->
Add "epoch" options to parser of 'basic_examples/lightning_module_template.py'
```python
parser.add_argument('--epochs', default=10, type=int, metavar='N',
help='number of total epochs to run')
trainer = pl.Trainer(max_epochs=hparams.epochs)
```
### Additional context
<!-- Add any other context or screenshots about the feature request here. -->
I am really enjoying PytorchLightning framework. Thanks 😄
</issue>
<code>
[start of pl_examples/basic_examples/lightning_module_template.py]
1 """
2 Example template for defining a system
3 """
4 import logging as log
5 import os
6 from argparse import ArgumentParser
7 from collections import OrderedDict
8
9 import torch
10 import torch.nn as nn
11 import torch.nn.functional as F
12 import torchvision.transforms as transforms
13 from torch import optim
14 from torch.utils.data import DataLoader
15 from torch.utils.data.distributed import DistributedSampler
16 from torchvision.datasets import MNIST
17
18 import pytorch_lightning as pl
19
20
21 class LightningTemplateModel(pl.LightningModule):
22 """
23 Sample model to show how to define a template
24 """
25
26 def __init__(self, hparams):
27 """
28 Pass in parsed HyperOptArgumentParser to the model
29 :param hparams:
30 """
31 # init superclass
32 super(LightningTemplateModel, self).__init__()
33 self.hparams = hparams
34
35 self.batch_size = hparams.batch_size
36
37 # if you specify an example input, the summary will show input/output for each layer
38 self.example_input_array = torch.rand(5, 28 * 28)
39
40 # build model
41 self.__build_model()
42
43 # ---------------------
44 # MODEL SETUP
45 # ---------------------
46 def __build_model(self):
47 """
48 Layout model
49 :return:
50 """
51 self.c_d1 = nn.Linear(in_features=self.hparams.in_features,
52 out_features=self.hparams.hidden_dim)
53 self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)
54 self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)
55
56 self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim,
57 out_features=self.hparams.out_features)
58
59 # ---------------------
60 # TRAINING
61 # ---------------------
62 def forward(self, x):
63 """
64 No special modification required for lightning, define as you normally would
65 :param x:
66 :return:
67 """
68
69 x = self.c_d1(x)
70 x = torch.tanh(x)
71 x = self.c_d1_bn(x)
72 x = self.c_d1_drop(x)
73
74 x = self.c_d2(x)
75 logits = F.log_softmax(x, dim=1)
76
77 return logits
78
79 def loss(self, labels, logits):
80 nll = F.nll_loss(logits, labels)
81 return nll
82
83 def training_step(self, batch, batch_idx):
84 """
85 Lightning calls this inside the training loop
86 :param batch:
87 :return:
88 """
89 # forward pass
90 x, y = batch
91 x = x.view(x.size(0), -1)
92
93 y_hat = self.forward(x)
94
95 # calculate loss
96 loss_val = self.loss(y, y_hat)
97
98 # in DP mode (default) make sure if result is scalar, there's another dim in the beginning
99 if self.trainer.use_dp or self.trainer.use_ddp2:
100 loss_val = loss_val.unsqueeze(0)
101
102 tqdm_dict = {'train_loss': loss_val}
103 output = OrderedDict({
104 'loss': loss_val,
105 'progress_bar': tqdm_dict,
106 'log': tqdm_dict
107 })
108
109 # can also return just a scalar instead of a dict (return loss_val)
110 return output
111
112 def validation_step(self, batch, batch_idx):
113 """
114 Lightning calls this inside the validation loop
115 :param batch:
116 :return:
117 """
118 x, y = batch
119 x = x.view(x.size(0), -1)
120 y_hat = self.forward(x)
121
122 loss_val = self.loss(y, y_hat)
123
124 # acc
125 labels_hat = torch.argmax(y_hat, dim=1)
126 val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
127 val_acc = torch.tensor(val_acc)
128
129 if self.on_gpu:
130 val_acc = val_acc.cuda(loss_val.device.index)
131
132 # in DP mode (default) make sure if result is scalar, there's another dim in the beginning
133 if self.trainer.use_dp or self.trainer.use_ddp2:
134 loss_val = loss_val.unsqueeze(0)
135 val_acc = val_acc.unsqueeze(0)
136
137 output = OrderedDict({
138 'val_loss': loss_val,
139 'val_acc': val_acc,
140 })
141
142 # can also return just a scalar instead of a dict (return loss_val)
143 return output
144
145 def validation_end(self, outputs):
146 """
147 Called at the end of validation to aggregate outputs
148 :param outputs: list of individual outputs of each validation step
149 :return:
150 """
151 # if returned a scalar from validation_step, outputs is a list of tensor scalars
152 # we return just the average in this case (if we want)
153 # return torch.stack(outputs).mean()
154
155 val_loss_mean = 0
156 val_acc_mean = 0
157 for output in outputs:
158 val_loss = output['val_loss']
159
160 # reduce manually when using dp
161 if self.trainer.use_dp or self.trainer.use_ddp2:
162 val_loss = torch.mean(val_loss)
163 val_loss_mean += val_loss
164
165 # reduce manually when using dp
166 val_acc = output['val_acc']
167 if self.trainer.use_dp or self.trainer.use_ddp2:
168 val_acc = torch.mean(val_acc)
169
170 val_acc_mean += val_acc
171
172 val_loss_mean /= len(outputs)
173 val_acc_mean /= len(outputs)
174 tqdm_dict = {'val_loss': val_loss_mean, 'val_acc': val_acc_mean}
175 result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}
176 return result
177
178 # ---------------------
179 # TRAINING SETUP
180 # ---------------------
181 def configure_optimizers(self):
182 """
183 return whatever optimizers we want here
184 :return: list of optimizers
185 """
186 optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
187 scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
188 return [optimizer], [scheduler]
189
190 def __dataloader(self, train):
191 # init data generators
192 transform = transforms.Compose([transforms.ToTensor(),
193 transforms.Normalize((0.5,), (1.0,))])
194 dataset = MNIST(root=self.hparams.data_root, train=train,
195 transform=transform, download=False)
196
197 # when using multi-node (ddp) we need to add the datasampler
198 batch_size = self.hparams.batch_size
199
200 loader = DataLoader(
201 dataset=dataset,
202 batch_size=batch_size,
203 num_workers=0
204 )
205
206 return loader
207
208 def prepare_data(self):
209 transform = transforms.Compose([transforms.ToTensor(),
210 transforms.Normalize((0.5,), (1.0,))])
211 dataset = MNIST(root=self.hparams.data_root, train=True,
212 transform=transform, download=True)
213 dataset = MNIST(root=self.hparams.data_root, train=False,
214 transform=transform, download=True)
215
216 def train_dataloader(self):
217 log.info('Training data loader called.')
218 return self.__dataloader(train=True)
219
220 def val_dataloader(self):
221 log.info('Validation data loader called.')
222 return self.__dataloader(train=False)
223
224 def test_dataloader(self):
225 log.info('Test data loader called.')
226 return self.__dataloader(train=False)
227
228 @staticmethod
229 def add_model_specific_args(parent_parser, root_dir): # pragma: no cover
230 """
231 Parameters you define here will be available to your model through self.hparams
232 :param parent_parser:
233 :param root_dir:
234 :return:
235 """
236 parser = ArgumentParser(parents=[parent_parser])
237
238 # param overwrites
239 # parser.set_defaults(gradient_clip_val=5.0)
240
241 # network params
242 parser.add_argument('--in_features', default=28 * 28, type=int)
243 parser.add_argument('--out_features', default=10, type=int)
244 # use 500 for CPU, 50000 for GPU to see speed difference
245 parser.add_argument('--hidden_dim', default=50000, type=int)
246 parser.add_argument('--drop_prob', default=0.2, type=float)
247 parser.add_argument('--learning_rate', default=0.001, type=float)
248
249 # data
250 parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
251
252 # training params (opt)
253 parser.add_argument('--optimizer_name', default='adam', type=str)
254 parser.add_argument('--batch_size', default=64, type=int)
255 return parser
256
[end of pl_examples/basic_examples/lightning_module_template.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pl_examples/basic_examples/lightning_module_template.py b/pl_examples/basic_examples/lightning_module_template.py
--- a/pl_examples/basic_examples/lightning_module_template.py
+++ b/pl_examples/basic_examples/lightning_module_template.py
@@ -250,6 +250,7 @@
parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)
# training params (opt)
+ parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--optimizer_name', default='adam', type=str)
parser.add_argument('--batch_size', default=64, type=int)
return parser
| {"golden_diff": "diff --git a/pl_examples/basic_examples/lightning_module_template.py b/pl_examples/basic_examples/lightning_module_template.py\n--- a/pl_examples/basic_examples/lightning_module_template.py\n+++ b/pl_examples/basic_examples/lightning_module_template.py\n@@ -250,6 +250,7 @@\n parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)\n \n # training params (opt)\n+ parser.add_argument('--epochs', default=20, type=int)\n parser.add_argument('--optimizer_name', default='adam', type=str)\n parser.add_argument('--batch_size', default=64, type=int)\n return parser\n", "issue": "Add \"epoch\" options to basic templates\n## \ud83d\ude80 Feature\r\n<!-- A clear and concise description of the feature proposal -->\r\nAdd \"epochs\" option to parser of 'basic_examples/lightning_module_template.py'\r\n\r\n### Motivation\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nThanks to 'basic_examples/lightning_module_template.py', I could build my deep learning model. Some beginners like me might build their model from this basic template. However, there are no options to manipulate epochs. I just thought that what people use often should be included in the basic template, so I uploaded my issue.\r\n\r\n### Pitch\r\n<!-- A clear and concise description of what you want to happen. -->\r\nI suggest that the basic template includes \"epoch\" option in the basic template.\r\n\r\n### Alternatives\r\n<!-- A clear and concise description of any alternative solutions or features you've considered, if any. -->\r\nAdd \"epoch\" options to parser of 'basic_examples/lightning_module_template.py'\r\n```python\r\nparser.add_argument('--epochs', default=10, type=int, metavar='N',\r\n help='number of total epochs to run')\r\n\r\ntrainer = pl.Trainer(max_epochs=hparams.epochs)\r\n```\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nI am really enjoying PytorchLightning framework. Thanks \ud83d\ude04 \r\n\n", "before_files": [{"content": "\"\"\"\nExample template for defining a system\n\"\"\"\nimport logging as log\nimport os\nfrom argparse import ArgumentParser\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as transforms\nfrom torch import optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torchvision.datasets import MNIST\n\nimport pytorch_lightning as pl\n\n\nclass LightningTemplateModel(pl.LightningModule):\n \"\"\"\n Sample model to show how to define a template\n \"\"\"\n\n def __init__(self, hparams):\n \"\"\"\n Pass in parsed HyperOptArgumentParser to the model\n :param hparams:\n \"\"\"\n # init superclass\n super(LightningTemplateModel, self).__init__()\n self.hparams = hparams\n\n self.batch_size = hparams.batch_size\n\n # if you specify an example input, the summary will show input/output for each layer\n self.example_input_array = torch.rand(5, 28 * 28)\n\n # build model\n self.__build_model()\n\n # ---------------------\n # MODEL SETUP\n # ---------------------\n def __build_model(self):\n \"\"\"\n Layout model\n :return:\n \"\"\"\n self.c_d1 = nn.Linear(in_features=self.hparams.in_features,\n out_features=self.hparams.hidden_dim)\n self.c_d1_bn = nn.BatchNorm1d(self.hparams.hidden_dim)\n self.c_d1_drop = nn.Dropout(self.hparams.drop_prob)\n\n self.c_d2 = nn.Linear(in_features=self.hparams.hidden_dim,\n out_features=self.hparams.out_features)\n\n # ---------------------\n # TRAINING\n # ---------------------\n def forward(self, x):\n \"\"\"\n No special modification required for lightning, define as you normally would\n :param x:\n :return:\n \"\"\"\n\n x = self.c_d1(x)\n x = torch.tanh(x)\n x = self.c_d1_bn(x)\n x = self.c_d1_drop(x)\n\n x = self.c_d2(x)\n logits = F.log_softmax(x, dim=1)\n\n return logits\n\n def loss(self, labels, logits):\n nll = F.nll_loss(logits, labels)\n return nll\n\n def training_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the training loop\n :param batch:\n :return:\n \"\"\"\n # forward pass\n x, y = batch\n x = x.view(x.size(0), -1)\n\n y_hat = self.forward(x)\n\n # calculate loss\n loss_val = self.loss(y, y_hat)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n\n tqdm_dict = {'train_loss': loss_val}\n output = OrderedDict({\n 'loss': loss_val,\n 'progress_bar': tqdm_dict,\n 'log': tqdm_dict\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_step(self, batch, batch_idx):\n \"\"\"\n Lightning calls this inside the validation loop\n :param batch:\n :return:\n \"\"\"\n x, y = batch\n x = x.view(x.size(0), -1)\n y_hat = self.forward(x)\n\n loss_val = self.loss(y, y_hat)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n val_acc = torch.tensor(val_acc)\n\n if self.on_gpu:\n val_acc = val_acc.cuda(loss_val.device.index)\n\n # in DP mode (default) make sure if result is scalar, there's another dim in the beginning\n if self.trainer.use_dp or self.trainer.use_ddp2:\n loss_val = loss_val.unsqueeze(0)\n val_acc = val_acc.unsqueeze(0)\n\n output = OrderedDict({\n 'val_loss': loss_val,\n 'val_acc': val_acc,\n })\n\n # can also return just a scalar instead of a dict (return loss_val)\n return output\n\n def validation_end(self, outputs):\n \"\"\"\n Called at the end of validation to aggregate outputs\n :param outputs: list of individual outputs of each validation step\n :return:\n \"\"\"\n # if returned a scalar from validation_step, outputs is a list of tensor scalars\n # we return just the average in this case (if we want)\n # return torch.stack(outputs).mean()\n\n val_loss_mean = 0\n val_acc_mean = 0\n for output in outputs:\n val_loss = output['val_loss']\n\n # reduce manually when using dp\n if self.trainer.use_dp or self.trainer.use_ddp2:\n val_loss = torch.mean(val_loss)\n val_loss_mean += val_loss\n\n # reduce manually when using dp\n val_acc = output['val_acc']\n if self.trainer.use_dp or self.trainer.use_ddp2:\n val_acc = torch.mean(val_acc)\n\n val_acc_mean += val_acc\n\n val_loss_mean /= len(outputs)\n val_acc_mean /= len(outputs)\n tqdm_dict = {'val_loss': val_loss_mean, 'val_acc': val_acc_mean}\n result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'val_loss': val_loss_mean}\n return result\n\n # ---------------------\n # TRAINING SETUP\n # ---------------------\n def configure_optimizers(self):\n \"\"\"\n return whatever optimizers we want here\n :return: list of optimizers\n \"\"\"\n optimizer = optim.Adam(self.parameters(), lr=self.hparams.learning_rate)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)\n return [optimizer], [scheduler]\n\n def __dataloader(self, train):\n # init data generators\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root=self.hparams.data_root, train=train,\n transform=transform, download=False)\n\n # when using multi-node (ddp) we need to add the datasampler\n batch_size = self.hparams.batch_size\n\n loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n num_workers=0\n )\n\n return loader\n\n def prepare_data(self):\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5,), (1.0,))])\n dataset = MNIST(root=self.hparams.data_root, train=True,\n transform=transform, download=True)\n dataset = MNIST(root=self.hparams.data_root, train=False,\n transform=transform, download=True)\n\n def train_dataloader(self):\n log.info('Training data loader called.')\n return self.__dataloader(train=True)\n\n def val_dataloader(self):\n log.info('Validation data loader called.')\n return self.__dataloader(train=False)\n\n def test_dataloader(self):\n log.info('Test data loader called.')\n return self.__dataloader(train=False)\n\n @staticmethod\n def add_model_specific_args(parent_parser, root_dir): # pragma: no cover\n \"\"\"\n Parameters you define here will be available to your model through self.hparams\n :param parent_parser:\n :param root_dir:\n :return:\n \"\"\"\n parser = ArgumentParser(parents=[parent_parser])\n\n # param overwrites\n # parser.set_defaults(gradient_clip_val=5.0)\n\n # network params\n parser.add_argument('--in_features', default=28 * 28, type=int)\n parser.add_argument('--out_features', default=10, type=int)\n # use 500 for CPU, 50000 for GPU to see speed difference\n parser.add_argument('--hidden_dim', default=50000, type=int)\n parser.add_argument('--drop_prob', default=0.2, type=float)\n parser.add_argument('--learning_rate', default=0.001, type=float)\n\n # data\n parser.add_argument('--data_root', default=os.path.join(root_dir, 'mnist'), type=str)\n\n # training params (opt)\n parser.add_argument('--optimizer_name', default='adam', type=str)\n parser.add_argument('--batch_size', default=64, type=int)\n return parser\n", "path": "pl_examples/basic_examples/lightning_module_template.py"}]} | 3,373 | 143 |
gh_patches_debug_32480 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-549 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
IterableDataset breaks 1.1 compatibility
A recently introduced feature unfortunately breaks compability with Pytorch 1.1.0.
**Describe the bug**
IterableDataset support, introduced in [issue 323](https://github.com/williamFalcon/pytorch-lightning/issues/323), requires Pytorch 1.2.0+.
**To Reproduce**
In a python environment with Pytorch 1.1.0 do:
import pytorch_lightning
**Expected behavior**
Compatibility with Pytorch 1.1.0. I'm filing it as a bug report rather than a docs fix since the dependency on 1.2.0+ introduced by [issue 323](https://github.com/williamFalcon/pytorch-lightning/issues/323) doesn't seem to be intentional.
</issue>
<code>
[start of pytorch_lightning/trainer/data_loading_mixin.py]
1 import warnings
2
3 import torch.distributed as dist
4 from torch.utils.data import IterableDataset
5 from torch.utils.data.distributed import DistributedSampler
6
7 from pytorch_lightning.utilities.debugging import MisconfigurationException
8
9 try:
10 from apex import amp
11
12 APEX_AVAILABLE = True
13 except ImportError:
14 APEX_AVAILABLE = False
15
16
17 class TrainerDataLoadingMixin(object):
18 def init_train_dataloader(self, model):
19 """
20 Dataloaders are provided by the model
21 :param model:
22 :return:
23 """
24 self.get_train_dataloader = model.train_dataloader
25
26 # determine number of training batches
27 if isinstance(self.get_train_dataloader().dataset, IterableDataset):
28 self.nb_training_batches = float('inf')
29 else:
30 self.nb_training_batches = len(self.get_train_dataloader())
31 self.nb_training_batches = int(self.nb_training_batches * self.train_percent_check)
32
33 # determine when to check validation
34 # if int passed in, val checks that often
35 # otherwise, it checks in [0, 1.0] % range of a training epoch
36 if isinstance(self.val_check_interval, int):
37 self.val_check_batch = self.val_check_interval
38 else:
39 self.val_check_batch = int(self.nb_training_batches * self.val_check_interval)
40 self.val_check_batch = max(1, self.val_check_batch)
41
42 on_ddp = self.use_ddp or self.use_ddp2
43 if on_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler):
44 msg = """
45 You're using multiple gpus and multiple nodes without using a DistributedSampler
46 to assign a subset of your data to each process. To silence this warning, pass a
47 DistributedSampler to your DataLoader.
48
49 ie: this:
50 dataset = myDataset()
51 dataloader = Dataloader(dataset)
52
53 becomes:
54 dataset = myDataset()
55 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
56 dataloader = Dataloader(dataset, sampler=dist_sampler)
57
58 If you want each process to load the full dataset, ignore this warning.
59 """
60 if msg not in self.shown_warnings and self.proc_rank == 0:
61 self.shown_warnings.add(msg)
62 warnings.warn(msg)
63
64 def init_val_dataloader(self, model):
65 """
66 Dataloaders are provided by the model
67 :param model:
68 :return:
69 """
70 self.get_val_dataloaders = model.val_dataloader
71
72 # determine number of validation batches
73 # val datasets could be none, 1 or 2+
74 if self.get_val_dataloaders() is not None:
75 self.nb_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())
76 self.nb_val_batches = int(self.nb_val_batches * self.val_percent_check)
77 self.nb_val_batches = max(1, self.nb_val_batches)
78
79 on_ddp = self.use_ddp or self.use_ddp2
80 if on_ddp and self.get_val_dataloaders() is not None:
81 for dataloader in self.get_val_dataloaders():
82 if not isinstance(dataloader.sampler, DistributedSampler):
83 msg = """
84 Your val_dataloader(s) don't use DistributedSampler.
85
86 You're using multiple gpus and multiple nodes without using a
87 DistributedSampler to assign a subset of your data to each process.
88 To silence this warning, pass a DistributedSampler to your DataLoader.
89
90 ie: this:
91 dataset = myDataset()
92 dataloader = Dataloader(dataset)
93
94 becomes:
95 dataset = myDataset()
96 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
97 dataloader = Dataloader(dataset, sampler=dist_sampler)
98
99 If you want each process to load the full dataset, ignore this warning.
100 """
101 if msg not in self.shown_warnings and self.proc_rank == 0:
102 self.shown_warnings.add(msg)
103 warnings.warn(msg)
104 break
105
106 def init_test_dataloader(self, model):
107 """
108 Dataloaders are provided by the model
109 :param model:
110 :return:
111 """
112
113 self.get_test_dataloaders = model.test_dataloader
114
115 # determine number of test batches
116 if self.get_test_dataloaders() is not None:
117 len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())
118 self.nb_test_batches = len_sum
119 self.nb_test_batches = int(self.nb_test_batches * self.test_percent_check)
120 self.nb_test_batches = max(1, self.nb_test_batches)
121
122 on_ddp = self.use_ddp or self.use_ddp2
123 if on_ddp and self.get_test_dataloaders() is not None:
124 for dataloader in self.get_test_dataloaders():
125 if not isinstance(dataloader.sampler, DistributedSampler):
126 msg = """
127 Your test_dataloader(s) don't use DistributedSampler.
128
129 You're using multiple gpus and multiple nodes without using a
130 DistributedSampler to assign a subset of your data to each process.
131 To silence this warning, pass a DistributedSampler to your DataLoader.
132
133 ie: this:
134 dataset = myDataset()
135 dataloader = Dataloader(dataset)
136
137 becomes:
138 dataset = myDataset()
139 dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
140 dataloader = Dataloader(dataset, sampler=dist_sampler)
141
142 If you want each process to load the full dataset, ignore this warning.
143 """
144 if msg not in self.shown_warnings and self.proc_rank == 0:
145 self.shown_warnings.add(msg)
146 warnings.warn(msg)
147 break
148
149 def get_dataloaders(self, model):
150 """
151 Dataloaders are provided by the model
152 :param model:
153 :return:
154 """
155
156 self.init_train_dataloader(model)
157 self.init_test_dataloader(model)
158 self.init_val_dataloader(model)
159
160 if self.use_ddp or self.use_ddp2:
161 # wait for all processes to catch up
162 dist.barrier()
163
164 # load each dataloader
165 self.get_train_dataloader()
166 self.get_test_dataloaders()
167 self.get_val_dataloaders()
168
169 # support IterableDataset for train data
170 self.is_iterable_train_dataloader = isinstance(self.get_train_dataloader().dataset, IterableDataset)
171 if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):
172 m = '''
173 When using an iterableDataset for train_dataloader,
174 Trainer(val_check_interval) must be an int.
175 An int k specifies checking validation every k training batches
176 '''
177 raise MisconfigurationException(m)
178
179 def determine_data_use_amount(self, train_percent_check, val_percent_check,
180 test_percent_check, overfit_pct):
181 """
182 Use less data for debugging purposes
183 """
184 self.train_percent_check = train_percent_check
185 self.val_percent_check = val_percent_check
186 self.test_percent_check = test_percent_check
187 if overfit_pct > 0:
188 self.train_percent_check = overfit_pct
189 self.val_percent_check = overfit_pct
190 self.test_percent_check = overfit_pct
191
[end of pytorch_lightning/trainer/data_loading_mixin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/trainer/data_loading_mixin.py b/pytorch_lightning/trainer/data_loading_mixin.py
--- a/pytorch_lightning/trainer/data_loading_mixin.py
+++ b/pytorch_lightning/trainer/data_loading_mixin.py
@@ -1,7 +1,17 @@
import warnings
import torch.distributed as dist
-from torch.utils.data import IterableDataset
+try:
+ # loading for pyTorch 1.3
+ from torch.utils.data import IterableDataset
+except ImportError:
+ # loading for pyTorch 1.1
+ import torch
+ warnings.warn('Your version of pyTorch %s does not support `IterableDataset`,'
+ ' please upgrade to 1.2+' % torch.__version__, ImportWarning)
+ EXIST_ITER_DATASET = False
+else:
+ EXIST_ITER_DATASET = True
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.utilities.debugging import MisconfigurationException
@@ -24,7 +34,7 @@
self.get_train_dataloader = model.train_dataloader
# determine number of training batches
- if isinstance(self.get_train_dataloader().dataset, IterableDataset):
+ if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):
self.nb_training_batches = float('inf')
else:
self.nb_training_batches = len(self.get_train_dataloader())
@@ -167,7 +177,8 @@
self.get_val_dataloaders()
# support IterableDataset for train data
- self.is_iterable_train_dataloader = isinstance(self.get_train_dataloader().dataset, IterableDataset)
+ self.is_iterable_train_dataloader = (
+ EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset))
if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):
m = '''
When using an iterableDataset for train_dataloader,
| {"golden_diff": "diff --git a/pytorch_lightning/trainer/data_loading_mixin.py b/pytorch_lightning/trainer/data_loading_mixin.py\n--- a/pytorch_lightning/trainer/data_loading_mixin.py\n+++ b/pytorch_lightning/trainer/data_loading_mixin.py\n@@ -1,7 +1,17 @@\n import warnings\n \n import torch.distributed as dist\n-from torch.utils.data import IterableDataset\n+try:\n+ # loading for pyTorch 1.3\n+ from torch.utils.data import IterableDataset\n+except ImportError:\n+ # loading for pyTorch 1.1\n+ import torch\n+ warnings.warn('Your version of pyTorch %s does not support `IterableDataset`,'\n+ ' please upgrade to 1.2+' % torch.__version__, ImportWarning)\n+ EXIST_ITER_DATASET = False\n+else:\n+ EXIST_ITER_DATASET = True\n from torch.utils.data.distributed import DistributedSampler\n \n from pytorch_lightning.utilities.debugging import MisconfigurationException\n@@ -24,7 +34,7 @@\n self.get_train_dataloader = model.train_dataloader\n \n # determine number of training batches\n- if isinstance(self.get_train_dataloader().dataset, IterableDataset):\n+ if EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset):\n self.nb_training_batches = float('inf')\n else:\n self.nb_training_batches = len(self.get_train_dataloader())\n@@ -167,7 +177,8 @@\n self.get_val_dataloaders()\n \n # support IterableDataset for train data\n- self.is_iterable_train_dataloader = isinstance(self.get_train_dataloader().dataset, IterableDataset)\n+ self.is_iterable_train_dataloader = (\n+ EXIST_ITER_DATASET and isinstance(self.get_train_dataloader().dataset, IterableDataset))\n if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):\n m = '''\n When using an iterableDataset for train_dataloader,\n", "issue": "IterableDataset breaks 1.1 compatibility\nA recently introduced feature unfortunately breaks compability with Pytorch 1.1.0. \r\n\r\n**Describe the bug**\r\nIterableDataset support, introduced in [issue 323](https://github.com/williamFalcon/pytorch-lightning/issues/323), requires Pytorch 1.2.0+. \r\n\r\n**To Reproduce**\r\nIn a python environment with Pytorch 1.1.0 do:\r\nimport pytorch_lightning\r\n\r\n**Expected behavior**\r\nCompatibility with Pytorch 1.1.0. I'm filing it as a bug report rather than a docs fix since the dependency on 1.2.0+ introduced by [issue 323](https://github.com/williamFalcon/pytorch-lightning/issues/323) doesn't seem to be intentional.\r\n\r\n\r\n\n", "before_files": [{"content": "import warnings\n\nimport torch.distributed as dist\nfrom torch.utils.data import IterableDataset\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom pytorch_lightning.utilities.debugging import MisconfigurationException\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerDataLoadingMixin(object):\n def init_train_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_train_dataloader = model.train_dataloader\n\n # determine number of training batches\n if isinstance(self.get_train_dataloader().dataset, IterableDataset):\n self.nb_training_batches = float('inf')\n else:\n self.nb_training_batches = len(self.get_train_dataloader())\n self.nb_training_batches = int(self.nb_training_batches * self.train_percent_check)\n\n # determine when to check validation\n # if int passed in, val checks that often\n # otherwise, it checks in [0, 1.0] % range of a training epoch\n if isinstance(self.val_check_interval, int):\n self.val_check_batch = self.val_check_interval\n else:\n self.val_check_batch = int(self.nb_training_batches * self.val_check_interval)\n self.val_check_batch = max(1, self.val_check_batch)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and not isinstance(self.get_train_dataloader().sampler, DistributedSampler):\n msg = \"\"\"\n You're using multiple gpus and multiple nodes without using a DistributedSampler\n to assign a subset of your data to each process. To silence this warning, pass a\n DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n\n def init_val_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n self.get_val_dataloaders = model.val_dataloader\n\n # determine number of validation batches\n # val datasets could be none, 1 or 2+\n if self.get_val_dataloaders() is not None:\n self.nb_val_batches = sum(len(dataloader) for dataloader in self.get_val_dataloaders())\n self.nb_val_batches = int(self.nb_val_batches * self.val_percent_check)\n self.nb_val_batches = max(1, self.nb_val_batches)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_val_dataloaders() is not None:\n for dataloader in self.get_val_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your val_dataloader(s) don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def init_test_dataloader(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n\n self.get_test_dataloaders = model.test_dataloader\n\n # determine number of test batches\n if self.get_test_dataloaders() is not None:\n len_sum = sum(len(dataloader) for dataloader in self.get_test_dataloaders())\n self.nb_test_batches = len_sum\n self.nb_test_batches = int(self.nb_test_batches * self.test_percent_check)\n self.nb_test_batches = max(1, self.nb_test_batches)\n\n on_ddp = self.use_ddp or self.use_ddp2\n if on_ddp and self.get_test_dataloaders() is not None:\n for dataloader in self.get_test_dataloaders():\n if not isinstance(dataloader.sampler, DistributedSampler):\n msg = \"\"\"\n Your test_dataloader(s) don't use DistributedSampler.\n\n You're using multiple gpus and multiple nodes without using a\n DistributedSampler to assign a subset of your data to each process.\n To silence this warning, pass a DistributedSampler to your DataLoader.\n\n ie: this:\n dataset = myDataset()\n dataloader = Dataloader(dataset)\n\n becomes:\n dataset = myDataset()\n dist_sampler = torch.utils.data.distributed.DistributedSampler(dataset)\n dataloader = Dataloader(dataset, sampler=dist_sampler)\n\n If you want each process to load the full dataset, ignore this warning.\n \"\"\"\n if msg not in self.shown_warnings and self.proc_rank == 0:\n self.shown_warnings.add(msg)\n warnings.warn(msg)\n break\n\n def get_dataloaders(self, model):\n \"\"\"\n Dataloaders are provided by the model\n :param model:\n :return:\n \"\"\"\n\n self.init_train_dataloader(model)\n self.init_test_dataloader(model)\n self.init_val_dataloader(model)\n\n if self.use_ddp or self.use_ddp2:\n # wait for all processes to catch up\n dist.barrier()\n\n # load each dataloader\n self.get_train_dataloader()\n self.get_test_dataloaders()\n self.get_val_dataloaders()\n\n # support IterableDataset for train data\n self.is_iterable_train_dataloader = isinstance(self.get_train_dataloader().dataset, IterableDataset)\n if self.is_iterable_train_dataloader and not isinstance(self.val_check_interval, int):\n m = '''\n When using an iterableDataset for train_dataloader,\n Trainer(val_check_interval) must be an int.\n An int k specifies checking validation every k training batches\n '''\n raise MisconfigurationException(m)\n\n def determine_data_use_amount(self, train_percent_check, val_percent_check,\n test_percent_check, overfit_pct):\n \"\"\"\n Use less data for debugging purposes\n \"\"\"\n self.train_percent_check = train_percent_check\n self.val_percent_check = val_percent_check\n self.test_percent_check = test_percent_check\n if overfit_pct > 0:\n self.train_percent_check = overfit_pct\n self.val_percent_check = overfit_pct\n self.test_percent_check = overfit_pct\n", "path": "pytorch_lightning/trainer/data_loading_mixin.py"}]} | 2,744 | 433 |
gh_patches_debug_58044 | rasdani/github-patches | git_diff | PyGithub__PyGithub-2084 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in calculating totalCount of review requests
I want to get the count of all review requests (users and teams), but there is a problem when using the following code:
```
for pull in repo.get_pulls(state='all'):
review_reqs = pull.get_review_requests()
users = review_reqs[0]
teams = review_reqs[1]
print(users.totalCount)
print(teams.totalCount)
```
It always prints `2` for the count of users and teams no matter what is the actual count of them.
For example, in the following case, you can see that both users and teams list is empty but again totalCount returns 2 for both of them:

</issue>
<code>
[start of github/PaginatedList.py]
1 ############################ Copyrights and license ############################
2 # #
3 # Copyright 2012 Vincent Jacques <[email protected]> #
4 # Copyright 2012 Zearin <[email protected]> #
5 # Copyright 2013 AKFish <[email protected]> #
6 # Copyright 2013 Bill Mill <[email protected]> #
7 # Copyright 2013 Vincent Jacques <[email protected]> #
8 # Copyright 2013 davidbrai <[email protected]> #
9 # Copyright 2014 Thialfihar <[email protected]> #
10 # Copyright 2014 Vincent Jacques <[email protected]> #
11 # Copyright 2015 Dan Vanderkam <[email protected]> #
12 # Copyright 2015 Eliot Walker <[email protected]> #
13 # Copyright 2016 Peter Buckley <[email protected]> #
14 # Copyright 2017 Jannis Gebauer <[email protected]> #
15 # Copyright 2018 Gilad Shefer <[email protected]> #
16 # Copyright 2018 Joel Koglin <[email protected]> #
17 # Copyright 2018 Wan Liuyang <[email protected]> #
18 # Copyright 2018 sfdye <[email protected]> #
19 # #
20 # This file is part of PyGithub. #
21 # http://pygithub.readthedocs.io/ #
22 # #
23 # PyGithub is free software: you can redistribute it and/or modify it under #
24 # the terms of the GNU Lesser General Public License as published by the Free #
25 # Software Foundation, either version 3 of the License, or (at your option) #
26 # any later version. #
27 # #
28 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
29 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
30 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
31 # details. #
32 # #
33 # You should have received a copy of the GNU Lesser General Public License #
34 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
35 # #
36 ################################################################################
37
38 from urllib.parse import parse_qs
39
40
41 class PaginatedListBase:
42 def __init__(self):
43 self.__elements = list()
44
45 def __getitem__(self, index):
46 assert isinstance(index, (int, slice))
47 if isinstance(index, int):
48 self.__fetchToIndex(index)
49 return self.__elements[index]
50 else:
51 return self._Slice(self, index)
52
53 def __iter__(self):
54 yield from self.__elements
55 while self._couldGrow():
56 newElements = self._grow()
57 yield from newElements
58
59 def _isBiggerThan(self, index):
60 return len(self.__elements) > index or self._couldGrow()
61
62 def __fetchToIndex(self, index):
63 while len(self.__elements) <= index and self._couldGrow():
64 self._grow()
65
66 def _grow(self):
67 newElements = self._fetchNextPage()
68 self.__elements += newElements
69 return newElements
70
71 class _Slice:
72 def __init__(self, theList, theSlice):
73 self.__list = theList
74 self.__start = theSlice.start or 0
75 self.__stop = theSlice.stop
76 self.__step = theSlice.step or 1
77
78 def __iter__(self):
79 index = self.__start
80 while not self.__finished(index):
81 if self.__list._isBiggerThan(index):
82 yield self.__list[index]
83 index += self.__step
84 else:
85 return
86
87 def __finished(self, index):
88 return self.__stop is not None and index >= self.__stop
89
90
91 class PaginatedList(PaginatedListBase):
92 """
93 This class abstracts the `pagination of the API <https://docs.github.com/en/rest/guides/traversing-with-pagination>`_.
94
95 You can simply enumerate through instances of this class::
96
97 for repo in user.get_repos():
98 print(repo.name)
99
100 If you want to know the total number of items in the list::
101
102 print(user.get_repos().totalCount)
103
104 You can also index them or take slices::
105
106 second_repo = user.get_repos()[1]
107 first_repos = user.get_repos()[:10]
108
109 If you want to iterate in reversed order, just do::
110
111 for repo in user.get_repos().reversed:
112 print(repo.name)
113
114 And if you really need it, you can explicitly access a specific page::
115
116 some_repos = user.get_repos().get_page(0)
117 some_other_repos = user.get_repos().get_page(3)
118 """
119
120 def __init__(
121 self,
122 contentClass,
123 requester,
124 firstUrl,
125 firstParams,
126 headers=None,
127 list_item="items",
128 ):
129 super().__init__()
130 self.__requester = requester
131 self.__contentClass = contentClass
132 self.__firstUrl = firstUrl
133 self.__firstParams = firstParams or ()
134 self.__nextUrl = firstUrl
135 self.__nextParams = firstParams or {}
136 self.__headers = headers
137 self.__list_item = list_item
138 if self.__requester.per_page != 30:
139 self.__nextParams["per_page"] = self.__requester.per_page
140 self._reversed = False
141 self.__totalCount = None
142
143 @property
144 def totalCount(self):
145 if not self.__totalCount:
146 params = {} if self.__nextParams is None else self.__nextParams.copy()
147 # set per_page = 1 so the totalCount is just the number of pages
148 params.update({"per_page": 1})
149 headers, data = self.__requester.requestJsonAndCheck(
150 "GET", self.__firstUrl, parameters=params, headers=self.__headers
151 )
152 if "link" not in headers:
153 if data and "total_count" in data:
154 self.__totalCount = data["total_count"]
155 elif data:
156 self.__totalCount = len(data)
157 else:
158 self.__totalCount = 0
159 else:
160 links = self.__parseLinkHeader(headers)
161 lastUrl = links.get("last")
162 if lastUrl:
163 self.__totalCount = int(parse_qs(lastUrl)["page"][0])
164 else:
165 self.__totalCount = 0
166 return self.__totalCount
167
168 def _getLastPageUrl(self):
169 headers, data = self.__requester.requestJsonAndCheck(
170 "GET", self.__firstUrl, parameters=self.__nextParams, headers=self.__headers
171 )
172 links = self.__parseLinkHeader(headers)
173 lastUrl = links.get("last")
174 return lastUrl
175
176 @property
177 def reversed(self):
178 r = PaginatedList(
179 self.__contentClass,
180 self.__requester,
181 self.__firstUrl,
182 self.__firstParams,
183 self.__headers,
184 self.__list_item,
185 )
186 r.__reverse()
187 return r
188
189 def __reverse(self):
190 self._reversed = True
191 lastUrl = self._getLastPageUrl()
192 if lastUrl:
193 self.__nextUrl = lastUrl
194
195 def _couldGrow(self):
196 return self.__nextUrl is not None
197
198 def _fetchNextPage(self):
199 headers, data = self.__requester.requestJsonAndCheck(
200 "GET", self.__nextUrl, parameters=self.__nextParams, headers=self.__headers
201 )
202 data = data if data else []
203
204 self.__nextUrl = None
205 if len(data) > 0:
206 links = self.__parseLinkHeader(headers)
207 if self._reversed:
208 if "prev" in links:
209 self.__nextUrl = links["prev"]
210 elif "next" in links:
211 self.__nextUrl = links["next"]
212 self.__nextParams = None
213
214 if self.__list_item in data:
215 self.__totalCount = data.get("total_count")
216 data = data[self.__list_item]
217
218 content = [
219 self.__contentClass(self.__requester, headers, element, completed=False)
220 for element in data
221 if element is not None
222 ]
223 if self._reversed:
224 return content[::-1]
225 return content
226
227 def __parseLinkHeader(self, headers):
228 links = {}
229 if "link" in headers:
230 linkHeaders = headers["link"].split(", ")
231 for linkHeader in linkHeaders:
232 url, rel, *rest = linkHeader.split("; ")
233 url = url[1:-1]
234 rel = rel[5:-1]
235 links[rel] = url
236 return links
237
238 def get_page(self, page):
239 params = dict(self.__firstParams)
240 if page != 0:
241 params["page"] = page + 1
242 if self.__requester.per_page != 30:
243 params["per_page"] = self.__requester.per_page
244 headers, data = self.__requester.requestJsonAndCheck(
245 "GET", self.__firstUrl, parameters=params, headers=self.__headers
246 )
247
248 if self.__list_item in data:
249 self.__totalCount = data.get("total_count")
250 data = data[self.__list_item]
251
252 return [
253 self.__contentClass(self.__requester, headers, element, completed=False)
254 for element in data
255 ]
256
[end of github/PaginatedList.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/github/PaginatedList.py b/github/PaginatedList.py
--- a/github/PaginatedList.py
+++ b/github/PaginatedList.py
@@ -153,6 +153,8 @@
if data and "total_count" in data:
self.__totalCount = data["total_count"]
elif data:
+ if isinstance(data, dict):
+ data = data[self.__list_item]
self.__totalCount = len(data)
else:
self.__totalCount = 0
| {"golden_diff": "diff --git a/github/PaginatedList.py b/github/PaginatedList.py\n--- a/github/PaginatedList.py\n+++ b/github/PaginatedList.py\n@@ -153,6 +153,8 @@\n if data and \"total_count\" in data:\n self.__totalCount = data[\"total_count\"]\n elif data:\n+ if isinstance(data, dict):\n+ data = data[self.__list_item]\n self.__totalCount = len(data)\n else:\n self.__totalCount = 0\n", "issue": "Bug in calculating totalCount of review requests\nI want to get the count of all review requests (users and teams), but there is a problem when using the following code:\r\n\r\n```\r\nfor pull in repo.get_pulls(state='all'):\r\n review_reqs = pull.get_review_requests()\r\n users = review_reqs[0]\r\n teams = review_reqs[1]\r\n print(users.totalCount)\r\n print(teams.totalCount)\r\n```\r\nIt always prints `2` for the count of users and teams no matter what is the actual count of them.\r\n\r\nFor example, in the following case, you can see that both users and teams list is empty but again totalCount returns 2 for both of them:\r\n\r\n\r\n\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Bill Mill <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 davidbrai <[email protected]> #\n# Copyright 2014 Thialfihar <[email protected]> #\n# Copyright 2014 Vincent Jacques <[email protected]> #\n# Copyright 2015 Dan Vanderkam <[email protected]> #\n# Copyright 2015 Eliot Walker <[email protected]> #\n# Copyright 2016 Peter Buckley <[email protected]> #\n# Copyright 2017 Jannis Gebauer <[email protected]> #\n# Copyright 2018 Gilad Shefer <[email protected]> #\n# Copyright 2018 Joel Koglin <[email protected]> #\n# Copyright 2018 Wan Liuyang <[email protected]> #\n# Copyright 2018 sfdye <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom urllib.parse import parse_qs\n\n\nclass PaginatedListBase:\n def __init__(self):\n self.__elements = list()\n\n def __getitem__(self, index):\n assert isinstance(index, (int, slice))\n if isinstance(index, int):\n self.__fetchToIndex(index)\n return self.__elements[index]\n else:\n return self._Slice(self, index)\n\n def __iter__(self):\n yield from self.__elements\n while self._couldGrow():\n newElements = self._grow()\n yield from newElements\n\n def _isBiggerThan(self, index):\n return len(self.__elements) > index or self._couldGrow()\n\n def __fetchToIndex(self, index):\n while len(self.__elements) <= index and self._couldGrow():\n self._grow()\n\n def _grow(self):\n newElements = self._fetchNextPage()\n self.__elements += newElements\n return newElements\n\n class _Slice:\n def __init__(self, theList, theSlice):\n self.__list = theList\n self.__start = theSlice.start or 0\n self.__stop = theSlice.stop\n self.__step = theSlice.step or 1\n\n def __iter__(self):\n index = self.__start\n while not self.__finished(index):\n if self.__list._isBiggerThan(index):\n yield self.__list[index]\n index += self.__step\n else:\n return\n\n def __finished(self, index):\n return self.__stop is not None and index >= self.__stop\n\n\nclass PaginatedList(PaginatedListBase):\n \"\"\"\n This class abstracts the `pagination of the API <https://docs.github.com/en/rest/guides/traversing-with-pagination>`_.\n\n You can simply enumerate through instances of this class::\n\n for repo in user.get_repos():\n print(repo.name)\n\n If you want to know the total number of items in the list::\n\n print(user.get_repos().totalCount)\n\n You can also index them or take slices::\n\n second_repo = user.get_repos()[1]\n first_repos = user.get_repos()[:10]\n\n If you want to iterate in reversed order, just do::\n\n for repo in user.get_repos().reversed:\n print(repo.name)\n\n And if you really need it, you can explicitly access a specific page::\n\n some_repos = user.get_repos().get_page(0)\n some_other_repos = user.get_repos().get_page(3)\n \"\"\"\n\n def __init__(\n self,\n contentClass,\n requester,\n firstUrl,\n firstParams,\n headers=None,\n list_item=\"items\",\n ):\n super().__init__()\n self.__requester = requester\n self.__contentClass = contentClass\n self.__firstUrl = firstUrl\n self.__firstParams = firstParams or ()\n self.__nextUrl = firstUrl\n self.__nextParams = firstParams or {}\n self.__headers = headers\n self.__list_item = list_item\n if self.__requester.per_page != 30:\n self.__nextParams[\"per_page\"] = self.__requester.per_page\n self._reversed = False\n self.__totalCount = None\n\n @property\n def totalCount(self):\n if not self.__totalCount:\n params = {} if self.__nextParams is None else self.__nextParams.copy()\n # set per_page = 1 so the totalCount is just the number of pages\n params.update({\"per_page\": 1})\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=params, headers=self.__headers\n )\n if \"link\" not in headers:\n if data and \"total_count\" in data:\n self.__totalCount = data[\"total_count\"]\n elif data:\n self.__totalCount = len(data)\n else:\n self.__totalCount = 0\n else:\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n if lastUrl:\n self.__totalCount = int(parse_qs(lastUrl)[\"page\"][0])\n else:\n self.__totalCount = 0\n return self.__totalCount\n\n def _getLastPageUrl(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=self.__nextParams, headers=self.__headers\n )\n links = self.__parseLinkHeader(headers)\n lastUrl = links.get(\"last\")\n return lastUrl\n\n @property\n def reversed(self):\n r = PaginatedList(\n self.__contentClass,\n self.__requester,\n self.__firstUrl,\n self.__firstParams,\n self.__headers,\n self.__list_item,\n )\n r.__reverse()\n return r\n\n def __reverse(self):\n self._reversed = True\n lastUrl = self._getLastPageUrl()\n if lastUrl:\n self.__nextUrl = lastUrl\n\n def _couldGrow(self):\n return self.__nextUrl is not None\n\n def _fetchNextPage(self):\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__nextUrl, parameters=self.__nextParams, headers=self.__headers\n )\n data = data if data else []\n\n self.__nextUrl = None\n if len(data) > 0:\n links = self.__parseLinkHeader(headers)\n if self._reversed:\n if \"prev\" in links:\n self.__nextUrl = links[\"prev\"]\n elif \"next\" in links:\n self.__nextUrl = links[\"next\"]\n self.__nextParams = None\n\n if self.__list_item in data:\n self.__totalCount = data.get(\"total_count\")\n data = data[self.__list_item]\n\n content = [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n if element is not None\n ]\n if self._reversed:\n return content[::-1]\n return content\n\n def __parseLinkHeader(self, headers):\n links = {}\n if \"link\" in headers:\n linkHeaders = headers[\"link\"].split(\", \")\n for linkHeader in linkHeaders:\n url, rel, *rest = linkHeader.split(\"; \")\n url = url[1:-1]\n rel = rel[5:-1]\n links[rel] = url\n return links\n\n def get_page(self, page):\n params = dict(self.__firstParams)\n if page != 0:\n params[\"page\"] = page + 1\n if self.__requester.per_page != 30:\n params[\"per_page\"] = self.__requester.per_page\n headers, data = self.__requester.requestJsonAndCheck(\n \"GET\", self.__firstUrl, parameters=params, headers=self.__headers\n )\n\n if self.__list_item in data:\n self.__totalCount = data.get(\"total_count\")\n data = data[self.__list_item]\n\n return [\n self.__contentClass(self.__requester, headers, element, completed=False)\n for element in data\n ]\n", "path": "github/PaginatedList.py"}]} | 3,513 | 112 |
gh_patches_debug_17055 | rasdani/github-patches | git_diff | svthalia__concrexit-1680 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show non-current FoodEvents in API v2.
### Describe the bug
The `api/v2/food/events/` and `api/v2/food/events/<pk>/` endpoints currently do not return FoodEvents that are not current.
I think to change that we’d only need to replace some `FoodEvent.current_objects.all()`s with `FoodEvent.objects.all()`.
</issue>
<code>
[start of website/pizzas/api/v2/views.py]
1 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope
2 from rest_framework.generics import (
3 ListAPIView,
4 RetrieveAPIView,
5 get_object_or_404,
6 CreateAPIView,
7 DestroyAPIView,
8 UpdateAPIView,
9 )
10
11 from rest_framework import filters as framework_filters, status
12 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly
13 from rest_framework.response import Response
14
15 from pizzas.api.v2 import filters
16 from pizzas.api.v2.serializers import (
17 ProductSerializer,
18 FoodOrderSerializer,
19 FoodOrderUpdateSerializer,
20 FoodOrderCreateSerializer,
21 )
22 from pizzas.api.v2.serializers.food_event import FoodEventSerializer
23 from pizzas.models import FoodEvent, Product, FoodOrder
24 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod
25
26
27 class FoodEventListView(ListAPIView):
28 """Returns an overview of all food events."""
29
30 serializer_class = FoodEventSerializer
31 queryset = FoodEvent.current_objects.all()
32 filter_backends = (
33 framework_filters.OrderingFilter,
34 filters.FoodEventDateFilterBackend,
35 )
36 ordering_fields = ("start", "end")
37 permission_classes = [
38 IsAuthenticatedOrTokenHasScope,
39 DjangoModelPermissionsOrAnonReadOnly,
40 ]
41 required_scopes = ["food:read"]
42
43
44 class FoodEventDetailView(RetrieveAPIView):
45 """Returns one single food event."""
46
47 serializer_class = FoodEventSerializer
48 queryset = FoodEvent.current_objects.all()
49 permission_classes = [
50 IsAuthenticatedOrTokenHasScope,
51 DjangoModelPermissionsOrAnonReadOnly,
52 ]
53 required_scopes = ["food:read"]
54
55
56 class FoodEventProductsListView(ListAPIView):
57 """Returns an overview of all products."""
58
59 serializer_class = ProductSerializer
60 queryset = Product.available_products.all()
61 filter_backends = (framework_filters.SearchFilter,)
62 search_fields = ("name",)
63 permission_classes = [
64 IsAuthenticatedOrTokenHasScope,
65 DjangoModelPermissionsOrAnonReadOnly,
66 ]
67 required_scopes = ["food:read"]
68
69
70 class FoodEventOrderDetailView(
71 RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
72 ):
73 """Returns details of a food order."""
74
75 permission_classes = [
76 IsAuthenticatedOrTokenHasScopeForMethod,
77 DjangoModelPermissionsOrAnonReadOnly,
78 ]
79 required_scopes_per_method = {
80 "GET": ["food:read"],
81 "POST": ["food:order"],
82 "PUT": ["food:order"],
83 "PATCH": ["food:order"],
84 "DELETE": ["food:order"],
85 }
86
87 def get_serializer_class(self):
88 if self.request.method.lower() == "get":
89 return FoodOrderSerializer
90 if self.request.method.lower() == "post":
91 return FoodOrderCreateSerializer
92 return FoodOrderUpdateSerializer
93
94 def get_queryset(self):
95 return FoodOrder.objects.filter(food_event=self.food_event)
96
97 def get_object(self):
98 queryset = self.filter_queryset(self.get_queryset())
99 obj = get_object_or_404(queryset, member=self.request.member)
100
101 # May raise a permission denied
102 self.check_object_permissions(self.request, obj)
103
104 return obj
105
106 def dispatch(self, request, *args, **kwargs):
107 self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk"))
108 return super().dispatch(request, *args, **kwargs)
109
110 def update(self, request, *args, **kwargs):
111 super().update(request, *args, **kwargs)
112 instance = self.get_object()
113 return Response(
114 FoodOrderSerializer(instance, context=self.get_serializer_context()).data
115 )
116
117 def create(self, request, *args, **kwargs):
118 serializer = self.get_serializer(data=request.data)
119 serializer.is_valid(raise_exception=True)
120 instance = serializer.save(food_event=self.food_event)
121 return Response(
122 FoodOrderSerializer(instance, context=self.get_serializer_context()).data,
123 status=status.HTTP_201_CREATED,
124 )
125
[end of website/pizzas/api/v2/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py
--- a/website/pizzas/api/v2/views.py
+++ b/website/pizzas/api/v2/views.py
@@ -28,7 +28,7 @@
"""Returns an overview of all food events."""
serializer_class = FoodEventSerializer
- queryset = FoodEvent.current_objects.all()
+ queryset = FoodEvent.objects.all()
filter_backends = (
framework_filters.OrderingFilter,
filters.FoodEventDateFilterBackend,
@@ -45,7 +45,7 @@
"""Returns one single food event."""
serializer_class = FoodEventSerializer
- queryset = FoodEvent.current_objects.all()
+ queryset = FoodEvent.objects.all()
permission_classes = [
IsAuthenticatedOrTokenHasScope,
DjangoModelPermissionsOrAnonReadOnly,
| {"golden_diff": "diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py\n--- a/website/pizzas/api/v2/views.py\n+++ b/website/pizzas/api/v2/views.py\n@@ -28,7 +28,7 @@\n \"\"\"Returns an overview of all food events.\"\"\"\n \n serializer_class = FoodEventSerializer\n- queryset = FoodEvent.current_objects.all()\n+ queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n@@ -45,7 +45,7 @@\n \"\"\"Returns one single food event.\"\"\"\n \n serializer_class = FoodEventSerializer\n- queryset = FoodEvent.current_objects.all()\n+ queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n", "issue": "Show non-current FoodEvents in API v2.\n### Describe the bug\r\nThe `api/v2/food/events/` and `api/v2/food/events/<pk>/` endpoints currently do not return FoodEvents that are not current. \r\n\r\nI think to change that we\u2019d only need to replace some `FoodEvent.current_objects.all()`s with `FoodEvent.objects.all()`.\r\n\n", "before_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\n\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.current_objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.current_objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n return super().dispatch(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n instance = serializer.save(food_event=self.food_event)\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}]} | 1,739 | 190 |
gh_patches_debug_30501 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calendar used as a trigger for automations never fire.
So I created an automation that uses the generated calendar to notify me when I have to put the bins out using the new calendar triggers.
However, the automation never runs because the trigger never fires.
I debugged this a bit and found tha following issue:
HA asks the integration [here](https://github.com/home-assistant/core/blob/dev/homeassistant/components/calendar/trigger.py#L98) for all applicable events. However, the returned list is not quite correct. The timestamps are datetimes and the integration checks only the date component [here](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/calendar.py#L53).
In my case, my local timezone is Europe/Berlin, which is currently UTC+2. HA gives UTC timestamps to the integration, so they are shifted by two hours "into the past" (not really, but you know what I mean). This means that the date check is wrong, as it misses the events for the day.
I changed the following and it worked in my testing but maybe you have a better idea on how to fix that:
```python
async def async_get_events(self, hass, start_datetime, end_datetime):
"""Return all events within specified time span."""
collections = []
for a in self._scraper.get_upcoming(include_today=True):
event = self._convert(a)
if event.start_datetime_local >= start_datetime and event.end_datetime_local <= end_datetime:
collections.append(event)
return collections
def _convert(self, collection):
"""Convert an collection into a Home Assistant calendar event."""
return CalendarEvent(
summary=collection.type,
start=collection.date,
end=collection.date,
)
```
Essentially, I convert to a HA calender event first and then let HA convert the start/end times of the event to local time to compare them against the given start/end times which are still in UTC. But both are now proper datetime objects with timezone information so comparing them works fine.
</issue>
<code>
[start of custom_components/waste_collection_schedule/calendar.py]
1 """Calendar platform support for Waste Collection Schedule."""
2
3 import logging
4 from datetime import timedelta
5
6 from homeassistant.components.calendar import CalendarEntity, CalendarEvent
7
8 from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (
9 Scraper,
10 )
11
12 _LOGGER = logging.getLogger(__name__)
13
14
15 async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
16 """Set up calendar platform."""
17 # We only want this platform to be set up via discovery.
18 if discovery_info is None:
19 return
20
21 entities = []
22
23 api = discovery_info["api"]
24
25 for scraper in api.scrapers:
26 dedicated_calendar_types = scraper.get_dedicated_calendar_types()
27 global_calendar_types = scraper.get_global_calendar_types()
28
29 if dedicated_calendar_types is not None:
30 for type in dedicated_calendar_types:
31 unique_id = calc_unique_calendar_id(scraper, type)
32
33 entities.append(
34 WasteCollectionCalendar(
35 api,
36 scraper,
37 scraper.get_calendar_title_for_type(type),
38 [scraper.get_collection_type(type)],
39 unique_id,
40 )
41 )
42
43 if global_calendar_types is not None or dedicated_calendar_types is None:
44 unique_id = calc_unique_calendar_id(scraper)
45 entities.append(
46 WasteCollectionCalendar(
47 api,
48 scraper,
49 scraper.calendar_title,
50 [
51 scraper.get_collection_type(type)
52 for type in global_calendar_types
53 ]
54 if global_calendar_types is not None
55 else None,
56 unique_id,
57 )
58 )
59
60 async_add_entities(entities)
61
62
63 class WasteCollectionCalendar(CalendarEntity):
64 """Calendar entity class."""
65
66 def __init__(self, api, scraper, name, types, unique_id: str):
67 self._api = api
68 self._scraper = scraper
69 self._name = name
70 self._types = types
71 self._unique_id = unique_id
72 self._attr_unique_id = unique_id
73
74 @property
75 def name(self):
76 """Return entity name."""
77 return self._name
78
79 @property
80 def event(self):
81 """Return next collection event."""
82 collections = self._scraper.get_upcoming(
83 count=1, include_today=True, types=self._types
84 )
85
86 if len(collections) == 0:
87 return None
88 else:
89 return self._convert(collections[0])
90
91 async def async_get_events(self, hass, start_date, end_date):
92 """Return all events within specified time span."""
93 collections = []
94 for a in self._scraper.get_upcoming(include_today=True, types=self._types):
95 if a.date >= start_date.date() and a.date <= end_date.date():
96 collections.append(self._convert(a))
97 return collections
98
99 def _convert(self, collection):
100 """Convert an collection into a Home Assistant calendar event."""
101 return CalendarEvent(
102 summary=collection.type,
103 start=collection.date,
104 end=collection.date + timedelta(days=1),
105 )
106
107
108 def calc_unique_calendar_id(scraper: Scraper, type: str | None = None):
109 return scraper.unique_id + ("_" + type if type is not None else "") + "_calendar"
110
[end of custom_components/waste_collection_schedule/calendar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py
--- a/custom_components/waste_collection_schedule/calendar.py
+++ b/custom_components/waste_collection_schedule/calendar.py
@@ -1,9 +1,11 @@
"""Calendar platform support for Waste Collection Schedule."""
import logging
-from datetime import timedelta
+from datetime import timedelta, timezone, datetime
from homeassistant.components.calendar import CalendarEntity, CalendarEvent
+from homeassistant.core import HomeAssistant
+from homeassistant.util.dt import DEFAULT_TIME_ZONE
from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (
Scraper,
@@ -88,15 +90,23 @@
else:
return self._convert(collections[0])
- async def async_get_events(self, hass, start_date, end_date):
+ async def async_get_events(
+ self, hass: HomeAssistant, start_date: datetime, end_date: datetime
+ ):
"""Return all events within specified time span."""
- collections = []
- for a in self._scraper.get_upcoming(include_today=True, types=self._types):
- if a.date >= start_date.date() and a.date <= end_date.date():
- collections.append(self._convert(a))
- return collections
+ events = []
- def _convert(self, collection):
+ for collection in self._scraper.get_upcoming(
+ include_today=True, types=self._types
+ ):
+ event = self._convert(collection)
+
+ if start_date <= event.start_datetime_local <= end_date:
+ events.append(event)
+
+ return events
+
+ def _convert(self, collection) -> CalendarEvent:
"""Convert an collection into a Home Assistant calendar event."""
return CalendarEvent(
summary=collection.type,
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/calendar.py b/custom_components/waste_collection_schedule/calendar.py\n--- a/custom_components/waste_collection_schedule/calendar.py\n+++ b/custom_components/waste_collection_schedule/calendar.py\n@@ -1,9 +1,11 @@\n \"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n \n import logging\n-from datetime import timedelta\n+from datetime import timedelta, timezone, datetime\n \n from homeassistant.components.calendar import CalendarEntity, CalendarEvent\n+from homeassistant.core import HomeAssistant\n+from homeassistant.util.dt import DEFAULT_TIME_ZONE\n \n from custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (\n Scraper,\n@@ -88,15 +90,23 @@\n else:\n return self._convert(collections[0])\n \n- async def async_get_events(self, hass, start_date, end_date):\n+ async def async_get_events(\n+ self, hass: HomeAssistant, start_date: datetime, end_date: datetime\n+ ):\n \"\"\"Return all events within specified time span.\"\"\"\n- collections = []\n- for a in self._scraper.get_upcoming(include_today=True, types=self._types):\n- if a.date >= start_date.date() and a.date <= end_date.date():\n- collections.append(self._convert(a))\n- return collections\n+ events = []\n \n- def _convert(self, collection):\n+ for collection in self._scraper.get_upcoming(\n+ include_today=True, types=self._types\n+ ):\n+ event = self._convert(collection)\n+\n+ if start_date <= event.start_datetime_local <= end_date:\n+ events.append(event)\n+\n+ return events\n+\n+ def _convert(self, collection) -> CalendarEvent:\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return CalendarEvent(\n summary=collection.type,\n", "issue": "Calendar used as a trigger for automations never fire.\nSo I created an automation that uses the generated calendar to notify me when I have to put the bins out using the new calendar triggers.\r\n\r\nHowever, the automation never runs because the trigger never fires.\r\n\r\nI debugged this a bit and found tha following issue:\r\n\r\nHA asks the integration [here](https://github.com/home-assistant/core/blob/dev/homeassistant/components/calendar/trigger.py#L98) for all applicable events. However, the returned list is not quite correct. The timestamps are datetimes and the integration checks only the date component [here](https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/custom_components/waste_collection_schedule/calendar.py#L53).\r\n\r\nIn my case, my local timezone is Europe/Berlin, which is currently UTC+2. HA gives UTC timestamps to the integration, so they are shifted by two hours \"into the past\" (not really, but you know what I mean). This means that the date check is wrong, as it misses the events for the day.\r\n\r\nI changed the following and it worked in my testing but maybe you have a better idea on how to fix that:\r\n\r\n```python\r\n async def async_get_events(self, hass, start_datetime, end_datetime):\r\n \"\"\"Return all events within specified time span.\"\"\"\r\n collections = []\r\n for a in self._scraper.get_upcoming(include_today=True):\r\n event = self._convert(a)\r\n if event.start_datetime_local >= start_datetime and event.end_datetime_local <= end_datetime:\r\n collections.append(event)\r\n return collections\r\n\r\n def _convert(self, collection):\r\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\r\n return CalendarEvent(\r\n summary=collection.type,\r\n start=collection.date,\r\n end=collection.date,\r\n )\r\n```\r\n\r\nEssentially, I convert to a HA calender event first and then let HA convert the start/end times of the event to local time to compare them against the given start/end times which are still in UTC. But both are now proper datetime objects with timezone information so comparing them works fine.\n", "before_files": [{"content": "\"\"\"Calendar platform support for Waste Collection Schedule.\"\"\"\n\nimport logging\nfrom datetime import timedelta\n\nfrom homeassistant.components.calendar import CalendarEntity, CalendarEvent\n\nfrom custom_components.waste_collection_schedule.waste_collection_schedule.scraper import (\n Scraper,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_platform(hass, config, async_add_entities, discovery_info=None):\n \"\"\"Set up calendar platform.\"\"\"\n # We only want this platform to be set up via discovery.\n if discovery_info is None:\n return\n\n entities = []\n\n api = discovery_info[\"api\"]\n\n for scraper in api.scrapers:\n dedicated_calendar_types = scraper.get_dedicated_calendar_types()\n global_calendar_types = scraper.get_global_calendar_types()\n\n if dedicated_calendar_types is not None:\n for type in dedicated_calendar_types:\n unique_id = calc_unique_calendar_id(scraper, type)\n\n entities.append(\n WasteCollectionCalendar(\n api,\n scraper,\n scraper.get_calendar_title_for_type(type),\n [scraper.get_collection_type(type)],\n unique_id,\n )\n )\n\n if global_calendar_types is not None or dedicated_calendar_types is None:\n unique_id = calc_unique_calendar_id(scraper)\n entities.append(\n WasteCollectionCalendar(\n api,\n scraper,\n scraper.calendar_title,\n [\n scraper.get_collection_type(type)\n for type in global_calendar_types\n ]\n if global_calendar_types is not None\n else None,\n unique_id,\n )\n )\n\n async_add_entities(entities)\n\n\nclass WasteCollectionCalendar(CalendarEntity):\n \"\"\"Calendar entity class.\"\"\"\n\n def __init__(self, api, scraper, name, types, unique_id: str):\n self._api = api\n self._scraper = scraper\n self._name = name\n self._types = types\n self._unique_id = unique_id\n self._attr_unique_id = unique_id\n\n @property\n def name(self):\n \"\"\"Return entity name.\"\"\"\n return self._name\n\n @property\n def event(self):\n \"\"\"Return next collection event.\"\"\"\n collections = self._scraper.get_upcoming(\n count=1, include_today=True, types=self._types\n )\n\n if len(collections) == 0:\n return None\n else:\n return self._convert(collections[0])\n\n async def async_get_events(self, hass, start_date, end_date):\n \"\"\"Return all events within specified time span.\"\"\"\n collections = []\n for a in self._scraper.get_upcoming(include_today=True, types=self._types):\n if a.date >= start_date.date() and a.date <= end_date.date():\n collections.append(self._convert(a))\n return collections\n\n def _convert(self, collection):\n \"\"\"Convert an collection into a Home Assistant calendar event.\"\"\"\n return CalendarEvent(\n summary=collection.type,\n start=collection.date,\n end=collection.date + timedelta(days=1),\n )\n\n\ndef calc_unique_calendar_id(scraper: Scraper, type: str | None = None):\n return scraper.unique_id + (\"_\" + type if type is not None else \"\") + \"_calendar\"\n", "path": "custom_components/waste_collection_schedule/calendar.py"}]} | 1,875 | 392 |
gh_patches_debug_36612 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-2633 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spider officedepot is broken
During the global build at 2021-08-18-14-42-26, spider **officedepot** failed with **0 features** and **0 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/officedepot.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson))
</issue>
<code>
[start of locations/spiders/officedepot.py]
1 # -*- coding: utf-8 -*-
2 import scrapy
3
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7
8 class OfficedepotSpider(scrapy.Spider):
9 name = 'officedepot'
10 allowed_domains = ['www.officedepot.com']
11 start_urls = ['https://www.officedepot.com/storelocator/states/']
12
13 def parse_store(self, response):
14 o = OpeningHours()
15 for d in response.xpath('//time[@itemprop="openingHours"]/@datetime').extract():
16 day, times = d.split(' ', 1)
17 s, f = times.split('-')
18
19 # They seem to have a bug where they put down 24:00 when they mean noon
20 if s == '24:00': s = '12:00'
21
22 o.add_range(day, s, f)
23
24
25 store_number_results = response.xpath('//dt[@class="lsp_number"]/text()')
26 if store_number_results:
27 ref = store_number_results[-1].extract().strip()
28
29 yield GeojsonPointItem(
30 lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(),
31 lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(),
32 phone=response.xpath('//p[@itemprop="telephone"]/text()').extract_first(),
33 addr_full=response.xpath('//p[@itemprop="streetAddress"]/text()').extract_first(),
34 city=response.xpath('//p[@itemprop="addressLocality"]/text()').extract_first(),
35 state=response.xpath('//p[@itemprop="addressRegion"]/text()').extract_first(),
36 postcode=response.xpath('//p[@itemprop="postalCode"]/text()').extract_first(),
37 website=response.url,
38 ref=ref,
39 opening_hours=o.as_opening_hours(),
40 )
41
42 def parse(self, response):
43 for state in response.xpath('//div[@style="float: left; width: 200px;"]/a/@href').extract():
44 yield scrapy.Request(
45 response.urljoin(state),
46 callback=self.parse,
47 )
48
49 for store in response.xpath('//div[@style="float: left; width: 300px; padding-top: 10px;"]/a/@href').extract():
50 yield scrapy.Request(
51 response.urljoin(store),
52 callback=self.parse_store,
53 )
54
[end of locations/spiders/officedepot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/officedepot.py b/locations/spiders/officedepot.py
--- a/locations/spiders/officedepot.py
+++ b/locations/spiders/officedepot.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+import json
import scrapy
from locations.items import GeojsonPointItem
@@ -7,8 +8,41 @@
class OfficedepotSpider(scrapy.Spider):
name = 'officedepot'
- allowed_domains = ['www.officedepot.com']
- start_urls = ['https://www.officedepot.com/storelocator/states/']
+ allowed_domains = ["where2getit.com"]
+
+ def start_requests(self):
+ url = 'https://locations.where2getit.com/officedepot/rest/getlist?like=0.9145201524205426&lang=en_US'
+
+ headers = {
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Origin': 'https://hosted.where2getit.com',
+ 'Accept-Encoding': 'gzip, deflate, br',
+ 'Accept': 'application/json, text/javascript, */*; q=0.01',
+ 'Referer': 'https://hosted.where2getit.com/officedepot/2015/index1.html',
+ 'Connection': 'keep-alive',
+ 'Content-Type': 'application/json',
+ }
+
+ form_data = {
+ "request": {
+ "appkey": "592778B0-A13B-11EB-B3DB-84030D516365",
+ "formdata": {
+ "order": "city",
+ "objectname": "Locator::Store",
+ "softmatch": "1",
+ "where": {
+ }
+ }
+ }
+ }
+
+ yield scrapy.http.FormRequest(
+ url=url,
+ method='POST',
+ body=json.dumps(form_data),
+ headers=headers,
+ callback=self.parse,
+ )
def parse_store(self, response):
o = OpeningHours()
@@ -40,14 +74,20 @@
)
def parse(self, response):
- for state in response.xpath('//div[@style="float: left; width: 200px;"]/a/@href').extract():
- yield scrapy.Request(
- response.urljoin(state),
- callback=self.parse,
- )
-
- for store in response.xpath('//div[@style="float: left; width: 300px; padding-top: 10px;"]/a/@href').extract():
- yield scrapy.Request(
- response.urljoin(store),
- callback=self.parse_store,
- )
+ data = json.loads(response.body_as_unicode())
+
+ for store in data["response"]["collection"]:
+ properties = {
+ 'ref': store["clientkey"],
+ 'name': store.get("name"),
+ 'addr_full': store["address1"],
+ 'city': store["city"],
+ 'state': store["state"],
+ 'postcode': store["postalcode"],
+ 'country': store["country"],
+ 'lat': store["latitude"],
+ 'lon': store["longitude"],
+ 'phone': store["phone"],
+ }
+
+ yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/officedepot.py b/locations/spiders/officedepot.py\n--- a/locations/spiders/officedepot.py\n+++ b/locations/spiders/officedepot.py\n@@ -1,4 +1,5 @@\n # -*- coding: utf-8 -*-\n+import json\n import scrapy\n \n from locations.items import GeojsonPointItem\n@@ -7,8 +8,41 @@\n \n class OfficedepotSpider(scrapy.Spider):\n name = 'officedepot'\n- allowed_domains = ['www.officedepot.com']\n- start_urls = ['https://www.officedepot.com/storelocator/states/']\n+ allowed_domains = [\"where2getit.com\"]\n+\n+ def start_requests(self):\n+ url = 'https://locations.where2getit.com/officedepot/rest/getlist?like=0.9145201524205426&lang=en_US'\n+\n+ headers = {\n+ 'Accept-Language': 'en-US,en;q=0.9',\n+ 'Origin': 'https://hosted.where2getit.com',\n+ 'Accept-Encoding': 'gzip, deflate, br',\n+ 'Accept': 'application/json, text/javascript, */*; q=0.01',\n+ 'Referer': 'https://hosted.where2getit.com/officedepot/2015/index1.html',\n+ 'Connection': 'keep-alive',\n+ 'Content-Type': 'application/json',\n+ }\n+\n+ form_data = {\n+ \"request\": {\n+ \"appkey\": \"592778B0-A13B-11EB-B3DB-84030D516365\",\n+ \"formdata\": {\n+ \"order\": \"city\",\n+ \"objectname\": \"Locator::Store\",\n+ \"softmatch\": \"1\",\n+ \"where\": {\n+ }\n+ }\n+ }\n+ }\n+\n+ yield scrapy.http.FormRequest(\n+ url=url,\n+ method='POST',\n+ body=json.dumps(form_data),\n+ headers=headers,\n+ callback=self.parse,\n+ )\n \n def parse_store(self, response):\n o = OpeningHours()\n@@ -40,14 +74,20 @@\n )\n \n def parse(self, response):\n- for state in response.xpath('//div[@style=\"float: left; width: 200px;\"]/a/@href').extract():\n- yield scrapy.Request(\n- response.urljoin(state),\n- callback=self.parse,\n- )\n-\n- for store in response.xpath('//div[@style=\"float: left; width: 300px; padding-top: 10px;\"]/a/@href').extract():\n- yield scrapy.Request(\n- response.urljoin(store),\n- callback=self.parse_store,\n- )\n+ data = json.loads(response.body_as_unicode())\n+\n+ for store in data[\"response\"][\"collection\"]:\n+ properties = {\n+ 'ref': store[\"clientkey\"],\n+ 'name': store.get(\"name\"),\n+ 'addr_full': store[\"address1\"],\n+ 'city': store[\"city\"],\n+ 'state': store[\"state\"],\n+ 'postcode': store[\"postalcode\"],\n+ 'country': store[\"country\"],\n+ 'lat': store[\"latitude\"],\n+ 'lon': store[\"longitude\"],\n+ 'phone': store[\"phone\"],\n+ }\n+\n+ yield GeojsonPointItem(**properties)\n", "issue": "Spider officedepot is broken\nDuring the global build at 2021-08-18-14-42-26, spider **officedepot** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/logs/officedepot.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-18-14-42-26/output/officedepot.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass OfficedepotSpider(scrapy.Spider):\n name = 'officedepot'\n allowed_domains = ['www.officedepot.com']\n start_urls = ['https://www.officedepot.com/storelocator/states/']\n\n def parse_store(self, response):\n o = OpeningHours()\n for d in response.xpath('//time[@itemprop=\"openingHours\"]/@datetime').extract():\n day, times = d.split(' ', 1)\n s, f = times.split('-')\n\n # They seem to have a bug where they put down 24:00 when they mean noon\n if s == '24:00': s = '12:00'\n\n o.add_range(day, s, f)\n\n\n store_number_results = response.xpath('//dt[@class=\"lsp_number\"]/text()')\n if store_number_results:\n ref = store_number_results[-1].extract().strip()\n\n yield GeojsonPointItem(\n lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n phone=response.xpath('//p[@itemprop=\"telephone\"]/text()').extract_first(),\n addr_full=response.xpath('//p[@itemprop=\"streetAddress\"]/text()').extract_first(),\n city=response.xpath('//p[@itemprop=\"addressLocality\"]/text()').extract_first(),\n state=response.xpath('//p[@itemprop=\"addressRegion\"]/text()').extract_first(),\n postcode=response.xpath('//p[@itemprop=\"postalCode\"]/text()').extract_first(),\n website=response.url,\n ref=ref,\n opening_hours=o.as_opening_hours(),\n )\n\n def parse(self, response):\n for state in response.xpath('//div[@style=\"float: left; width: 200px;\"]/a/@href').extract():\n yield scrapy.Request(\n response.urljoin(state),\n callback=self.parse,\n )\n\n for store in response.xpath('//div[@style=\"float: left; width: 300px; padding-top: 10px;\"]/a/@href').extract():\n yield scrapy.Request(\n response.urljoin(store),\n callback=self.parse_store,\n )\n", "path": "locations/spiders/officedepot.py"}]} | 1,337 | 777 |
gh_patches_debug_29567 | rasdani/github-patches | git_diff | GPflow__GPflow-1350 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tf2.2-rc1 gast requirement
Hi,
gpflow 2.0.0-rc1 has gast requirement 0.2.2.
TensorFlow has gast requirement 0.3.3 from 2.2-rc1, which is incompatible with gpflow requirement.
Best Regards,
Marco
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3
4 # pylint: skip-file
5
6 import os
7 import sys
8 from pathlib import Path
9
10 from pkg_resources import parse_version
11 from setuptools import find_packages, setup
12
13 is_py37 = sys.version_info.major == 3 and sys.version_info.minor == 7
14 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # copied from the docs
15
16 # Dependencies of GPflow
17 requirements = [
18 'numpy>=1.10.0',
19 'scipy>=0.18.0',
20 'multipledispatch>=0.4.9',
21 'tabulate',
22 'gast==0.2.2',
23 ]
24
25 if not is_py37:
26 requirements.append("dataclasses")
27
28 if not on_rtd:
29 requirements.append("tensorflow-probability>=0.9")
30
31 min_tf_version = '2.1.0'
32 tf_cpu = 'tensorflow'
33 tf_gpu = 'tensorflow-gpu'
34
35 # Only detect TF if not installed or outdated. If not, do not do not list as
36 # requirement to avoid installing over e.g. tensorflow-gpu
37 # To avoid this, rely on importing rather than the package name (like pip).
38
39 try:
40 # If tf not installed, import raises ImportError
41 import tensorflow as tf
42 if parse_version(tf.__version__) < parse_version(min_tf_version):
43 # TF pre-installed, but below the minimum required version
44 raise DeprecationWarning("TensorFlow version below minimum requirement")
45 except (ImportError, DeprecationWarning):
46 # Add TensorFlow to dependencies to trigger installation/update
47 if not on_rtd:
48 # Do not add TF if we are installing GPflow on readthedocs
49 requirements.append(tf_cpu)
50
51 with open(str(Path(".", "VERSION").absolute())) as version_file:
52 version = version_file.read().strip()
53
54 packages = find_packages('.', exclude=["tests"])
55
56 setup(name='gpflow',
57 version=version,
58 author="James Hensman, Alex Matthews",
59 author_email="[email protected]",
60 description="Gaussian process methods in TensorFlow",
61 license="Apache License 2.0",
62 keywords="machine-learning gaussian-processes kernels tensorflow",
63 url="http://github.com/GPflow/GPflow",
64 packages=packages,
65 include_package_data=True,
66 install_requires=requirements,
67 extras_require={'Tensorflow with GPU': [tf_gpu]},
68 python_requires=">=3.6",
69 classifiers=[
70 'License :: OSI Approved :: Apache Software License',
71 'Natural Language :: English',
72 'Operating System :: MacOS :: MacOS X',
73 'Operating System :: Microsoft :: Windows',
74 'Operating System :: POSIX :: Linux',
75 'Programming Language :: Python :: 3.6',
76 'Topic :: Scientific/Engineering :: Artificial Intelligence'
77 ])
78
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,8 +18,7 @@
'numpy>=1.10.0',
'scipy>=0.18.0',
'multipledispatch>=0.4.9',
- 'tabulate',
- 'gast==0.2.2',
+ 'tabulate'
]
if not is_py37:
@@ -32,6 +31,22 @@
tf_cpu = 'tensorflow'
tf_gpu = 'tensorflow-gpu'
+
+# for latest_version() [see https://github.com/GPflow/GPflow/issues/1348]:
+def latest_version(package_name):
+ import json
+ from urllib import request
+ import re
+
+ url = f"https://pypi.python.org/pypi/{package_name}/json"
+ data = json.load(request.urlopen(url))
+ # filter out rc and beta releases and, more generally, any releases that
+ # do not contain exclusively numbers and dots.
+ versions = [parse_version(v) for v in data["releases"].keys() if re.match("^[0-9.]+$", v)]
+ versions.sort()
+ return versions[-1] # return latest version
+
+
# Only detect TF if not installed or outdated. If not, do not do not list as
# requirement to avoid installing over e.g. tensorflow-gpu
# To avoid this, rely on importing rather than the package name (like pip).
@@ -47,6 +62,9 @@
if not on_rtd:
# Do not add TF if we are installing GPflow on readthedocs
requirements.append(tf_cpu)
+ gast_requirement = 'gast>=0.2.2,<0.3' if latest_version('tensorflow') < parse_version('2.2') else 'gast>=0.3.3'
+ requirements.append(gast_requirement)
+
with open(str(Path(".", "VERSION").absolute())) as version_file:
version = version_file.read().strip()
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,8 +18,7 @@\n 'numpy>=1.10.0',\n 'scipy>=0.18.0',\n 'multipledispatch>=0.4.9',\n- 'tabulate',\n- 'gast==0.2.2',\n+ 'tabulate'\n ]\n \n if not is_py37:\n@@ -32,6 +31,22 @@\n tf_cpu = 'tensorflow'\n tf_gpu = 'tensorflow-gpu'\n \n+\n+# for latest_version() [see https://github.com/GPflow/GPflow/issues/1348]:\n+def latest_version(package_name):\n+ import json\n+ from urllib import request\n+ import re\n+\n+ url = f\"https://pypi.python.org/pypi/{package_name}/json\"\n+ data = json.load(request.urlopen(url))\n+ # filter out rc and beta releases and, more generally, any releases that\n+ # do not contain exclusively numbers and dots.\n+ versions = [parse_version(v) for v in data[\"releases\"].keys() if re.match(\"^[0-9.]+$\", v)] \n+ versions.sort()\n+ return versions[-1] # return latest version\n+\n+\n # Only detect TF if not installed or outdated. If not, do not do not list as\n # requirement to avoid installing over e.g. tensorflow-gpu\n # To avoid this, rely on importing rather than the package name (like pip).\n@@ -47,6 +62,9 @@\n if not on_rtd:\n # Do not add TF if we are installing GPflow on readthedocs\n requirements.append(tf_cpu)\n+ gast_requirement = 'gast>=0.2.2,<0.3' if latest_version('tensorflow') < parse_version('2.2') else 'gast>=0.3.3'\n+ requirements.append(gast_requirement)\n+ \n \n with open(str(Path(\".\", \"VERSION\").absolute())) as version_file:\n version = version_file.read().strip()\n", "issue": "tf2.2-rc1 gast requirement\nHi,\r\n\r\ngpflow 2.0.0-rc1 has gast requirement 0.2.2.\r\n\r\nTensorFlow has gast requirement 0.3.3 from 2.2-rc1, which is incompatible with gpflow requirement.\r\n\r\nBest Regards,\r\n\r\nMarco\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# pylint: skip-file\n\nimport os\nimport sys\nfrom pathlib import Path\n\nfrom pkg_resources import parse_version\nfrom setuptools import find_packages, setup\n\nis_py37 = sys.version_info.major == 3 and sys.version_info.minor == 7\non_rtd = os.environ.get('READTHEDOCS', None) == 'True' # copied from the docs\n\n# Dependencies of GPflow\nrequirements = [\n 'numpy>=1.10.0',\n 'scipy>=0.18.0',\n 'multipledispatch>=0.4.9',\n 'tabulate',\n 'gast==0.2.2',\n]\n\nif not is_py37:\n requirements.append(\"dataclasses\")\n\nif not on_rtd:\n requirements.append(\"tensorflow-probability>=0.9\")\n\nmin_tf_version = '2.1.0'\ntf_cpu = 'tensorflow'\ntf_gpu = 'tensorflow-gpu'\n\n# Only detect TF if not installed or outdated. If not, do not do not list as\n# requirement to avoid installing over e.g. tensorflow-gpu\n# To avoid this, rely on importing rather than the package name (like pip).\n\ntry:\n # If tf not installed, import raises ImportError\n import tensorflow as tf\n if parse_version(tf.__version__) < parse_version(min_tf_version):\n # TF pre-installed, but below the minimum required version\n raise DeprecationWarning(\"TensorFlow version below minimum requirement\")\nexcept (ImportError, DeprecationWarning):\n # Add TensorFlow to dependencies to trigger installation/update\n if not on_rtd:\n # Do not add TF if we are installing GPflow on readthedocs\n requirements.append(tf_cpu)\n\nwith open(str(Path(\".\", \"VERSION\").absolute())) as version_file:\n version = version_file.read().strip()\n\npackages = find_packages('.', exclude=[\"tests\"])\n\nsetup(name='gpflow',\n version=version,\n author=\"James Hensman, Alex Matthews\",\n author_email=\"[email protected]\",\n description=\"Gaussian process methods in TensorFlow\",\n license=\"Apache License 2.0\",\n keywords=\"machine-learning gaussian-processes kernels tensorflow\",\n url=\"http://github.com/GPflow/GPflow\",\n packages=packages,\n include_package_data=True,\n install_requires=requirements,\n extras_require={'Tensorflow with GPU': [tf_gpu]},\n python_requires=\">=3.6\",\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence'\n ])\n", "path": "setup.py"}]} | 1,358 | 462 |
gh_patches_debug_18286 | rasdani/github-patches | git_diff | spotify__luigi-908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Exception when a command-line parametter contains a non-ascii character
Hey,
We've just upgraded to Luigi 1.1.2, and now we get an error when launching Tasks that have utf-8 characters on the parameters. (using python 2.7.3)
Here's the stacktrace we get :
```
Traceback (most recent call last):
File "examples/top_artists.py", line 283, in <module>
luigi.run()
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py", line 434, in run
return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults)
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py", line 165, in run
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py", line 59, in acquire_for
my_pid, my_cmd, pid_file = get_info(pid_dir)
File "/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py", line 44, in get_info
pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 97: ordinal not in range(128)
```
Here I've just added a `plop = luigi.Parameter()` to `AggregateArtists`, and called it with
`python examples/top_artists.py AggregateArtists --local-scheduler --date-interval 2012-06 --plop à`
The error seems to come from the [Python3 refactoring](https://github.com/spotify/luigi/pull/745). Removing `.encode('utf8')` on [this line](https://github.com/spotify/luigi/blob/master/luigi/lock.py#L44) seems to solve the problem, but I guess it was added for a reason ^^.
Any ideas of how we could solve this on our side, other than removing the encode? (I'll try to add a test for it later)
</issue>
<code>
[start of luigi/lock.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Locking functionality when launching things from the command line.
19 Uses a pidfile.
20 This prevents multiple identical workflows to be launched simultaneously.
21 """
22 from __future__ import print_function
23
24 import hashlib
25 import os
26
27
28 def getpcmd(pid):
29 """
30 Returns command of process.
31
32 :param pid:
33 """
34 cmd = 'ps -p %s -o command=' % (pid,)
35 p = os.popen(cmd, 'r')
36 return p.readline().strip()
37
38
39 def get_info(pid_dir):
40 # Check the name and pid of this process
41 my_pid = os.getpid()
42 my_cmd = getpcmd(my_pid)
43
44 pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
45
46 return my_pid, my_cmd, pid_file
47
48
49 def acquire_for(pid_dir, num_available=1):
50 """
51 Makes sure the process is only run once at the same time with the same name.
52
53 Notice that we since we check the process name, different parameters to the same
54 command can spawn multiple processes at the same time, i.e. running
55 "/usr/bin/my_process" does not prevent anyone from launching
56 "/usr/bin/my_process --foo bar".
57 """
58
59 my_pid, my_cmd, pid_file = get_info(pid_dir)
60
61 # Check if there is a pid file corresponding to this name
62 if not os.path.exists(pid_dir):
63 os.mkdir(pid_dir)
64 os.chmod(pid_dir, 0o777)
65
66 pids = set()
67 pid_cmds = {}
68 if os.path.exists(pid_file):
69 # There is such a file - read the pid and look up its process name
70 pids.update(filter(None, map(str.strip, open(pid_file))))
71 pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)
72 matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))
73
74 if len(matching_pids) >= num_available:
75 # We are already running under a different pid
76 print('Pid(s)', ', '.join(matching_pids), 'already running')
77 return False
78 else:
79 # The pid belongs to something else, we could
80 pass
81 pid_cmds[str(my_pid)] = my_cmd
82
83 # Write pids
84 pids.add(str(my_pid))
85 with open(pid_file, 'w') as f:
86 f.writelines('%s\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))
87
88 # Make the file writable by all
89 if os.name == 'nt':
90 pass
91 else:
92 s = os.stat(pid_file)
93 if os.getuid() == s.st_uid:
94 os.chmod(pid_file, s.st_mode | 0o777)
95
96 return True
97
[end of luigi/lock.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/luigi/lock.py b/luigi/lock.py
--- a/luigi/lock.py
+++ b/luigi/lock.py
@@ -24,6 +24,8 @@
import hashlib
import os
+from luigi import six
+
def getpcmd(pid):
"""
@@ -32,16 +34,23 @@
:param pid:
"""
cmd = 'ps -p %s -o command=' % (pid,)
- p = os.popen(cmd, 'r')
- return p.readline().strip()
+ with os.popen(cmd, 'r') as p:
+ return p.readline().strip()
-def get_info(pid_dir):
+def get_info(pid_dir, my_pid=None):
# Check the name and pid of this process
- my_pid = os.getpid()
+ if my_pid is None:
+ my_pid = os.getpid()
+
my_cmd = getpcmd(my_pid)
- pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'
+ if six.PY3:
+ cmd_hash = my_cmd.encode('utf8')
+ else:
+ cmd_hash = my_cmd
+
+ pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'
return my_pid, my_cmd, pid_file
| {"golden_diff": "diff --git a/luigi/lock.py b/luigi/lock.py\n--- a/luigi/lock.py\n+++ b/luigi/lock.py\n@@ -24,6 +24,8 @@\n import hashlib\n import os\n \n+from luigi import six\n+\n \n def getpcmd(pid):\n \"\"\"\n@@ -32,16 +34,23 @@\n :param pid:\n \"\"\"\n cmd = 'ps -p %s -o command=' % (pid,)\n- p = os.popen(cmd, 'r')\n- return p.readline().strip()\n+ with os.popen(cmd, 'r') as p:\n+ return p.readline().strip()\n \n \n-def get_info(pid_dir):\n+def get_info(pid_dir, my_pid=None):\n # Check the name and pid of this process\n- my_pid = os.getpid()\n+ if my_pid is None:\n+ my_pid = os.getpid()\n+\n my_cmd = getpcmd(my_pid)\n \n- pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\n+ if six.PY3:\n+ cmd_hash = my_cmd.encode('utf8')\n+ else:\n+ cmd_hash = my_cmd\n+\n+ pid_file = os.path.join(pid_dir, hashlib.md5(cmd_hash).hexdigest()) + '.pid'\n \n return my_pid, my_cmd, pid_file\n", "issue": "Exception when a command-line parametter contains a non-ascii character\nHey,\n\nWe've just upgraded to Luigi 1.1.2, and now we get an error when launching Tasks that have utf-8 characters on the parameters. (using python 2.7.3)\n\nHere's the stacktrace we get : \n\n```\nTraceback (most recent call last):\n File \"examples/top_artists.py\", line 283, in <module>\n luigi.run()\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py\", line 434, in run\n return interface.run(tasks, worker_scheduler_factory, override_defaults=override_defaults)\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/interface.py\", line 165, in run\n not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size))):\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py\", line 59, in acquire_for\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n File \"/home/jonas/.virtualenvs/website/local/lib/python2.7/site-packages/luigi/lock.py\", line 44, in get_info\n pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 97: ordinal not in range(128)\n```\n\nHere I've just added a `plop = luigi.Parameter()` to `AggregateArtists`, and called it with \n`python examples/top_artists.py AggregateArtists --local-scheduler --date-interval 2012-06 --plop \u00e0`\n\nThe error seems to come from the [Python3 refactoring](https://github.com/spotify/luigi/pull/745). Removing `.encode('utf8')` on [this line](https://github.com/spotify/luigi/blob/master/luigi/lock.py#L44) seems to solve the problem, but I guess it was added for a reason ^^.\n\nAny ideas of how we could solve this on our side, other than removing the encode? (I'll try to add a test for it later)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nLocking functionality when launching things from the command line.\nUses a pidfile.\nThis prevents multiple identical workflows to be launched simultaneously.\n\"\"\"\nfrom __future__ import print_function\n\nimport hashlib\nimport os\n\n\ndef getpcmd(pid):\n \"\"\"\n Returns command of process.\n\n :param pid:\n \"\"\"\n cmd = 'ps -p %s -o command=' % (pid,)\n p = os.popen(cmd, 'r')\n return p.readline().strip()\n\n\ndef get_info(pid_dir):\n # Check the name and pid of this process\n my_pid = os.getpid()\n my_cmd = getpcmd(my_pid)\n\n pid_file = os.path.join(pid_dir, hashlib.md5(my_cmd.encode('utf8')).hexdigest()) + '.pid'\n\n return my_pid, my_cmd, pid_file\n\n\ndef acquire_for(pid_dir, num_available=1):\n \"\"\"\n Makes sure the process is only run once at the same time with the same name.\n\n Notice that we since we check the process name, different parameters to the same\n command can spawn multiple processes at the same time, i.e. running\n \"/usr/bin/my_process\" does not prevent anyone from launching\n \"/usr/bin/my_process --foo bar\".\n \"\"\"\n\n my_pid, my_cmd, pid_file = get_info(pid_dir)\n\n # Check if there is a pid file corresponding to this name\n if not os.path.exists(pid_dir):\n os.mkdir(pid_dir)\n os.chmod(pid_dir, 0o777)\n\n pids = set()\n pid_cmds = {}\n if os.path.exists(pid_file):\n # There is such a file - read the pid and look up its process name\n pids.update(filter(None, map(str.strip, open(pid_file))))\n pid_cmds = dict((pid, getpcmd(pid)) for pid in pids)\n matching_pids = list(filter(lambda pid: pid_cmds[pid] == my_cmd, pids))\n\n if len(matching_pids) >= num_available:\n # We are already running under a different pid\n print('Pid(s)', ', '.join(matching_pids), 'already running')\n return False\n else:\n # The pid belongs to something else, we could\n pass\n pid_cmds[str(my_pid)] = my_cmd\n\n # Write pids\n pids.add(str(my_pid))\n with open(pid_file, 'w') as f:\n f.writelines('%s\\n' % (pid, ) for pid in filter(pid_cmds.__getitem__, pids))\n\n # Make the file writable by all\n if os.name == 'nt':\n pass\n else:\n s = os.stat(pid_file)\n if os.getuid() == s.st_uid:\n os.chmod(pid_file, s.st_mode | 0o777)\n\n return True\n", "path": "luigi/lock.py"}]} | 2,000 | 313 |
gh_patches_debug_32795 | rasdani/github-patches | git_diff | litestar-org__litestar-1999 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Lots of `pydantic` warnings: `.dict()` and `.json()` are deprecated
### Description
You can find lots of `DeprecationWarning` instances here: https://github.com/litestar-org/litestar/actions/runs/5578844701/jobs/10193581342
I propose to add a compat layer to call `.model_dump` and `.model_dump_json` on v2 and `.dict` and `.json` on v1, since they are both supported right now.
### URL to code causing the issue
_No response_
### MCVE
_No response_
### Steps to reproduce
_No response_
### Screenshots
_No response_
### Logs
_No response_
### Litestar Version
`main`
### Platform
- [ ] Linux
- [X] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
## Funding
* If you would like to see an issue prioritized, make a pledge towards it!
* We receive the pledge once the issue is completed & verified
<a href="https://polar.sh/litestar-org/litestar/issues/1996">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/1996/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/1996/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
</issue>
<code>
[start of litestar/_openapi/schema_generation/examples.py]
1 from __future__ import annotations
2
3 from dataclasses import replace
4 from enum import Enum
5 from typing import TYPE_CHECKING, Any
6
7 from _decimal import Decimal
8 from polyfactory.exceptions import ParameterException
9 from polyfactory.field_meta import FieldMeta, Null
10 from polyfactory.utils.helpers import unwrap_annotation
11
12 from litestar.openapi.spec import Example
13 from litestar.types import Empty
14 from litestar.utils import is_pydantic_model_instance
15
16 try:
17 from polyfactory.factories.pydantic_factory import ModelFactory as Factory
18 except ImportError:
19 from polyfactory.factories import DataclassFactory as Factory # type: ignore[assignment]
20
21
22 if TYPE_CHECKING:
23 from litestar.typing import FieldDefinition
24
25
26 Factory.seed_random(10)
27
28
29 def _normalize_example_value(value: Any) -> Any:
30 """Normalize the example value to make it look a bit prettier."""
31 value = unwrap_annotation(annotation=value, random=Factory.__random__)
32 if isinstance(value, (Decimal, float)):
33 value = round(float(value), 2)
34 if isinstance(value, Enum):
35 value = value.value
36 if is_pydantic_model_instance(value):
37 value = value.dict()
38 if isinstance(value, (list, set)):
39 value = [_normalize_example_value(v) for v in value]
40 if isinstance(value, dict):
41 for k, v in value.items():
42 value[k] = _normalize_example_value(v)
43 return value
44
45
46 def _create_field_meta(field: FieldDefinition) -> FieldMeta:
47 return FieldMeta.from_type(
48 annotation=field.annotation,
49 default=field.default if field.default is not Empty else Null,
50 name=field.name,
51 random=Factory.__random__,
52 )
53
54
55 def create_examples_for_field(field: FieldDefinition) -> list[Example]:
56 """Create an OpenAPI Example instance.
57
58 Args:
59 field: A signature field.
60
61 Returns:
62 A list including a single example.
63 """
64 try:
65 field_meta = _create_field_meta(replace(field, annotation=_normalize_example_value(field.annotation)))
66 value = Factory.get_field_value(field_meta)
67 return [Example(description=f"Example {field.name} value", value=value)]
68 except ParameterException:
69 return []
70
[end of litestar/_openapi/schema_generation/examples.py]
[start of litestar/contrib/pydantic/__init__.py]
1 from .pydantic_dto_factory import PydanticDTO
2 from .pydantic_init_plugin import PydanticInitPlugin
3 from .pydantic_schema_plugin import PydanticSchemaPlugin
4
5 __all__ = ("PydanticDTO", "PydanticInitPlugin", "PydanticSchemaPlugin")
6
[end of litestar/contrib/pydantic/__init__.py]
[start of litestar/contrib/pydantic/pydantic_init_plugin.py]
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
4 from uuid import UUID
5
6 from msgspec import ValidationError
7
8 from litestar.exceptions import MissingDependencyException
9 from litestar.plugins import InitPluginProtocol
10 from litestar.serialization._msgspec_utils import ExtendedMsgSpecValidationError
11 from litestar.utils import is_class_and_subclass, is_pydantic_model_class
12
13 if TYPE_CHECKING:
14 from litestar.config.app import AppConfig
15
16 try:
17 import pydantic
18 except ImportError as e:
19 raise MissingDependencyException("pydantic") from e
20
21 T = TypeVar("T")
22
23
24 def _dec_pydantic(model_type: type[pydantic.BaseModel], value: Any) -> pydantic.BaseModel:
25 try:
26 return (
27 model_type.model_validate(value, strict=False)
28 if hasattr(model_type, "model_validate")
29 else model_type.parse_obj(value)
30 )
31 except pydantic.ValidationError as e:
32 raise ExtendedMsgSpecValidationError(errors=cast("list[dict[str, Any]]", e.errors())) from e
33
34
35 def _dec_pydantic_uuid(
36 uuid_type: type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5],
37 value: Any,
38 ) -> type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5]: # pragma: no cover
39 if isinstance(value, str):
40 value = uuid_type(value)
41
42 elif isinstance(value, (bytes, bytearray)):
43 try:
44 value = uuid_type(value.decode())
45 except ValueError:
46 # 16 bytes in big-endian order as the bytes argument fail
47 # the above check
48 value = uuid_type(bytes=value)
49 elif isinstance(value, UUID):
50 value = uuid_type(str(value))
51
52 if not isinstance(value, uuid_type):
53 raise ValidationError(f"Invalid UUID: {value!r}")
54
55 if value._required_version != value.version: # pyright: ignore
56 raise ValidationError(f"Invalid UUID version: {value!r}")
57
58 return cast("type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5]", value)
59
60
61 def _is_pydantic_uuid(value: Any) -> bool: # pragma: no cover
62 return is_class_and_subclass(value, (pydantic.UUID1, pydantic.UUID3, pydantic.UUID4, pydantic.UUID5))
63
64
65 _base_encoders: dict[Any, Callable[[Any], Any]] = {
66 pydantic.EmailStr: str,
67 pydantic.NameEmail: str,
68 pydantic.ByteSize: lambda val: val.real,
69 }
70
71
72 class PydanticInitPlugin(InitPluginProtocol):
73 @classmethod
74 def encoders(cls) -> dict[Any, Callable[[Any], Any]]:
75 if pydantic.VERSION.startswith("1"): # pragma: no cover
76 return {**_base_encoders, **cls._create_pydantic_v1_encoders()}
77 return {**_base_encoders, **cls._create_pydantic_v2_encoders()}
78
79 @classmethod
80 def decoders(cls) -> list[tuple[Callable[[Any], bool], Callable[[Any, Any], Any]]]:
81 decoders: list[tuple[Callable[[Any], bool], Callable[[Any, Any], Any]]] = [
82 (is_pydantic_model_class, _dec_pydantic)
83 ]
84
85 if pydantic.VERSION.startswith("1"): # pragma: no cover
86 decoders.append((_is_pydantic_uuid, _dec_pydantic_uuid))
87
88 return decoders
89
90 @staticmethod
91 def _create_pydantic_v1_encoders() -> dict[Any, Callable[[Any], Any]]: # pragma: no cover
92 return {
93 pydantic.BaseModel: lambda model: {
94 k: v.decode() if isinstance(v, bytes) else v for k, v in model.dict().items()
95 },
96 pydantic.SecretField: str,
97 pydantic.StrictBool: int,
98 pydantic.color.Color: str, # pyright: ignore
99 pydantic.ConstrainedBytes: lambda val: val.decode("utf-8"),
100 pydantic.ConstrainedDate: lambda val: val.isoformat(),
101 }
102
103 @staticmethod
104 def _create_pydantic_v2_encoders() -> dict[Any, Callable[[Any], Any]]:
105 try:
106 from pydantic_extra_types import color
107 except ImportError:
108 color = None # type: ignore[assignment]
109 encoders: dict[Any, Callable[[Any], Any]] = {
110 pydantic.BaseModel: lambda model: model.model_dump(mode="json"),
111 pydantic.types.SecretStr: lambda val: "**********" if val else "",
112 pydantic.types.SecretBytes: lambda val: "**********" if val else "",
113 }
114 if color:
115 encoders[color.Color] = str
116 return encoders
117
118 def on_app_init(self, app_config: AppConfig) -> AppConfig:
119 app_config.type_encoders = {**self.encoders(), **(app_config.type_encoders or {})}
120 app_config.type_decoders = [*self.decoders(), *(app_config.type_decoders or [])]
121 return app_config
122
[end of litestar/contrib/pydantic/pydantic_init_plugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/_openapi/schema_generation/examples.py b/litestar/_openapi/schema_generation/examples.py
--- a/litestar/_openapi/schema_generation/examples.py
+++ b/litestar/_openapi/schema_generation/examples.py
@@ -34,7 +34,9 @@
if isinstance(value, Enum):
value = value.value
if is_pydantic_model_instance(value):
- value = value.dict()
+ from litestar.contrib.pydantic import _model_dump
+
+ value = _model_dump(value)
if isinstance(value, (list, set)):
value = [_normalize_example_value(v) for v in value]
if isinstance(value, dict):
diff --git a/litestar/contrib/pydantic/__init__.py b/litestar/contrib/pydantic/__init__.py
--- a/litestar/contrib/pydantic/__init__.py
+++ b/litestar/contrib/pydantic/__init__.py
@@ -1,5 +1,24 @@
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, Any
+
from .pydantic_dto_factory import PydanticDTO
from .pydantic_init_plugin import PydanticInitPlugin
from .pydantic_schema_plugin import PydanticSchemaPlugin
+if TYPE_CHECKING:
+ import pydantic
+
__all__ = ("PydanticDTO", "PydanticInitPlugin", "PydanticSchemaPlugin")
+
+
+def _model_dump(model: pydantic.BaseModel, *, by_alias: bool = False) -> dict[str, Any]:
+ return (
+ model.model_dump(mode="json", by_alias=by_alias)
+ if hasattr(model, "model_dump")
+ else model.dict(by_alias=by_alias)
+ )
+
+
+def _model_dump_json(model: pydantic.BaseModel) -> str:
+ return model.model_dump_json() if hasattr(model, "model_dump_json") else model.json()
diff --git a/litestar/contrib/pydantic/pydantic_init_plugin.py b/litestar/contrib/pydantic/pydantic_init_plugin.py
--- a/litestar/contrib/pydantic/pydantic_init_plugin.py
+++ b/litestar/contrib/pydantic/pydantic_init_plugin.py
@@ -90,9 +90,7 @@
@staticmethod
def _create_pydantic_v1_encoders() -> dict[Any, Callable[[Any], Any]]: # pragma: no cover
return {
- pydantic.BaseModel: lambda model: {
- k: v.decode() if isinstance(v, bytes) else v for k, v in model.dict().items()
- },
+ pydantic.BaseModel: lambda model: model.dict(),
pydantic.SecretField: str,
pydantic.StrictBool: int,
pydantic.color.Color: str, # pyright: ignore
| {"golden_diff": "diff --git a/litestar/_openapi/schema_generation/examples.py b/litestar/_openapi/schema_generation/examples.py\n--- a/litestar/_openapi/schema_generation/examples.py\n+++ b/litestar/_openapi/schema_generation/examples.py\n@@ -34,7 +34,9 @@\n if isinstance(value, Enum):\n value = value.value\n if is_pydantic_model_instance(value):\n- value = value.dict()\n+ from litestar.contrib.pydantic import _model_dump\n+\n+ value = _model_dump(value)\n if isinstance(value, (list, set)):\n value = [_normalize_example_value(v) for v in value]\n if isinstance(value, dict):\ndiff --git a/litestar/contrib/pydantic/__init__.py b/litestar/contrib/pydantic/__init__.py\n--- a/litestar/contrib/pydantic/__init__.py\n+++ b/litestar/contrib/pydantic/__init__.py\n@@ -1,5 +1,24 @@\n+from __future__ import annotations\n+\n+from typing import TYPE_CHECKING, Any\n+\n from .pydantic_dto_factory import PydanticDTO\n from .pydantic_init_plugin import PydanticInitPlugin\n from .pydantic_schema_plugin import PydanticSchemaPlugin\n \n+if TYPE_CHECKING:\n+ import pydantic\n+\n __all__ = (\"PydanticDTO\", \"PydanticInitPlugin\", \"PydanticSchemaPlugin\")\n+\n+\n+def _model_dump(model: pydantic.BaseModel, *, by_alias: bool = False) -> dict[str, Any]:\n+ return (\n+ model.model_dump(mode=\"json\", by_alias=by_alias)\n+ if hasattr(model, \"model_dump\")\n+ else model.dict(by_alias=by_alias)\n+ )\n+\n+\n+def _model_dump_json(model: pydantic.BaseModel) -> str:\n+ return model.model_dump_json() if hasattr(model, \"model_dump_json\") else model.json()\ndiff --git a/litestar/contrib/pydantic/pydantic_init_plugin.py b/litestar/contrib/pydantic/pydantic_init_plugin.py\n--- a/litestar/contrib/pydantic/pydantic_init_plugin.py\n+++ b/litestar/contrib/pydantic/pydantic_init_plugin.py\n@@ -90,9 +90,7 @@\n @staticmethod\n def _create_pydantic_v1_encoders() -> dict[Any, Callable[[Any], Any]]: # pragma: no cover\n return {\n- pydantic.BaseModel: lambda model: {\n- k: v.decode() if isinstance(v, bytes) else v for k, v in model.dict().items()\n- },\n+ pydantic.BaseModel: lambda model: model.dict(),\n pydantic.SecretField: str,\n pydantic.StrictBool: int,\n pydantic.color.Color: str, # pyright: ignore\n", "issue": "Lots of `pydantic` warnings: `.dict()` and `.json()` are deprecated\n### Description\n\nYou can find lots of `DeprecationWarning` instances here: https://github.com/litestar-org/litestar/actions/runs/5578844701/jobs/10193581342\r\n\r\nI propose to add a compat layer to call `.model_dump` and `.model_dump_json` on v2 and `.dict` and `.json` on v1, since they are both supported right now.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n_No response_\n\n### Screenshots\n\n_No response_\n\n### Logs\n\n_No response_\n\n### Litestar Version\n\n`main`\n\n### Platform\n\n- [ ] Linux\n- [X] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n\n<!-- POLAR PLEDGE BADGE START -->\n## Funding\n* If you would like to see an issue prioritized, make a pledge towards it!\n* We receive the pledge once the issue is completed & verified\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/1996\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/1996/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/1996/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import replace\nfrom enum import Enum\nfrom typing import TYPE_CHECKING, Any\n\nfrom _decimal import Decimal\nfrom polyfactory.exceptions import ParameterException\nfrom polyfactory.field_meta import FieldMeta, Null\nfrom polyfactory.utils.helpers import unwrap_annotation\n\nfrom litestar.openapi.spec import Example\nfrom litestar.types import Empty\nfrom litestar.utils import is_pydantic_model_instance\n\ntry:\n from polyfactory.factories.pydantic_factory import ModelFactory as Factory\nexcept ImportError:\n from polyfactory.factories import DataclassFactory as Factory # type: ignore[assignment]\n\n\nif TYPE_CHECKING:\n from litestar.typing import FieldDefinition\n\n\nFactory.seed_random(10)\n\n\ndef _normalize_example_value(value: Any) -> Any:\n \"\"\"Normalize the example value to make it look a bit prettier.\"\"\"\n value = unwrap_annotation(annotation=value, random=Factory.__random__)\n if isinstance(value, (Decimal, float)):\n value = round(float(value), 2)\n if isinstance(value, Enum):\n value = value.value\n if is_pydantic_model_instance(value):\n value = value.dict()\n if isinstance(value, (list, set)):\n value = [_normalize_example_value(v) for v in value]\n if isinstance(value, dict):\n for k, v in value.items():\n value[k] = _normalize_example_value(v)\n return value\n\n\ndef _create_field_meta(field: FieldDefinition) -> FieldMeta:\n return FieldMeta.from_type(\n annotation=field.annotation,\n default=field.default if field.default is not Empty else Null,\n name=field.name,\n random=Factory.__random__,\n )\n\n\ndef create_examples_for_field(field: FieldDefinition) -> list[Example]:\n \"\"\"Create an OpenAPI Example instance.\n\n Args:\n field: A signature field.\n\n Returns:\n A list including a single example.\n \"\"\"\n try:\n field_meta = _create_field_meta(replace(field, annotation=_normalize_example_value(field.annotation)))\n value = Factory.get_field_value(field_meta)\n return [Example(description=f\"Example {field.name} value\", value=value)]\n except ParameterException:\n return []\n", "path": "litestar/_openapi/schema_generation/examples.py"}, {"content": "from .pydantic_dto_factory import PydanticDTO\nfrom .pydantic_init_plugin import PydanticInitPlugin\nfrom .pydantic_schema_plugin import PydanticSchemaPlugin\n\n__all__ = (\"PydanticDTO\", \"PydanticInitPlugin\", \"PydanticSchemaPlugin\")\n", "path": "litestar/contrib/pydantic/__init__.py"}, {"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any, Callable, TypeVar, cast\nfrom uuid import UUID\n\nfrom msgspec import ValidationError\n\nfrom litestar.exceptions import MissingDependencyException\nfrom litestar.plugins import InitPluginProtocol\nfrom litestar.serialization._msgspec_utils import ExtendedMsgSpecValidationError\nfrom litestar.utils import is_class_and_subclass, is_pydantic_model_class\n\nif TYPE_CHECKING:\n from litestar.config.app import AppConfig\n\ntry:\n import pydantic\nexcept ImportError as e:\n raise MissingDependencyException(\"pydantic\") from e\n\nT = TypeVar(\"T\")\n\n\ndef _dec_pydantic(model_type: type[pydantic.BaseModel], value: Any) -> pydantic.BaseModel:\n try:\n return (\n model_type.model_validate(value, strict=False)\n if hasattr(model_type, \"model_validate\")\n else model_type.parse_obj(value)\n )\n except pydantic.ValidationError as e:\n raise ExtendedMsgSpecValidationError(errors=cast(\"list[dict[str, Any]]\", e.errors())) from e\n\n\ndef _dec_pydantic_uuid(\n uuid_type: type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5],\n value: Any,\n) -> type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5]: # pragma: no cover\n if isinstance(value, str):\n value = uuid_type(value)\n\n elif isinstance(value, (bytes, bytearray)):\n try:\n value = uuid_type(value.decode())\n except ValueError:\n # 16 bytes in big-endian order as the bytes argument fail\n # the above check\n value = uuid_type(bytes=value)\n elif isinstance(value, UUID):\n value = uuid_type(str(value))\n\n if not isinstance(value, uuid_type):\n raise ValidationError(f\"Invalid UUID: {value!r}\")\n\n if value._required_version != value.version: # pyright: ignore\n raise ValidationError(f\"Invalid UUID version: {value!r}\")\n\n return cast(\"type[pydantic.UUID1] | type[pydantic.UUID3] | type[pydantic.UUID4] | type[pydantic.UUID5]\", value)\n\n\ndef _is_pydantic_uuid(value: Any) -> bool: # pragma: no cover\n return is_class_and_subclass(value, (pydantic.UUID1, pydantic.UUID3, pydantic.UUID4, pydantic.UUID5))\n\n\n_base_encoders: dict[Any, Callable[[Any], Any]] = {\n pydantic.EmailStr: str,\n pydantic.NameEmail: str,\n pydantic.ByteSize: lambda val: val.real,\n}\n\n\nclass PydanticInitPlugin(InitPluginProtocol):\n @classmethod\n def encoders(cls) -> dict[Any, Callable[[Any], Any]]:\n if pydantic.VERSION.startswith(\"1\"): # pragma: no cover\n return {**_base_encoders, **cls._create_pydantic_v1_encoders()}\n return {**_base_encoders, **cls._create_pydantic_v2_encoders()}\n\n @classmethod\n def decoders(cls) -> list[tuple[Callable[[Any], bool], Callable[[Any, Any], Any]]]:\n decoders: list[tuple[Callable[[Any], bool], Callable[[Any, Any], Any]]] = [\n (is_pydantic_model_class, _dec_pydantic)\n ]\n\n if pydantic.VERSION.startswith(\"1\"): # pragma: no cover\n decoders.append((_is_pydantic_uuid, _dec_pydantic_uuid))\n\n return decoders\n\n @staticmethod\n def _create_pydantic_v1_encoders() -> dict[Any, Callable[[Any], Any]]: # pragma: no cover\n return {\n pydantic.BaseModel: lambda model: {\n k: v.decode() if isinstance(v, bytes) else v for k, v in model.dict().items()\n },\n pydantic.SecretField: str,\n pydantic.StrictBool: int,\n pydantic.color.Color: str, # pyright: ignore\n pydantic.ConstrainedBytes: lambda val: val.decode(\"utf-8\"),\n pydantic.ConstrainedDate: lambda val: val.isoformat(),\n }\n\n @staticmethod\n def _create_pydantic_v2_encoders() -> dict[Any, Callable[[Any], Any]]:\n try:\n from pydantic_extra_types import color\n except ImportError:\n color = None # type: ignore[assignment]\n encoders: dict[Any, Callable[[Any], Any]] = {\n pydantic.BaseModel: lambda model: model.model_dump(mode=\"json\"),\n pydantic.types.SecretStr: lambda val: \"**********\" if val else \"\",\n pydantic.types.SecretBytes: lambda val: \"**********\" if val else \"\",\n }\n if color:\n encoders[color.Color] = str\n return encoders\n\n def on_app_init(self, app_config: AppConfig) -> AppConfig:\n app_config.type_encoders = {**self.encoders(), **(app_config.type_encoders or {})}\n app_config.type_decoders = [*self.decoders(), *(app_config.type_decoders or [])]\n return app_config\n", "path": "litestar/contrib/pydantic/pydantic_init_plugin.py"}]} | 3,090 | 637 |
gh_patches_debug_22058 | rasdani/github-patches | git_diff | pex-tool__pex-258 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update PyPI page
It would be nice if the `README.rst` were included in the `setup.py` `long_description` in addition to the `CHANGES.rst` so that users browsing PyPI could read the README without having to travel to GitHub.
Would also be nice if the trove classifiers in `setup.py` reflected which versions of Python were officially supported (e.g. `'Programming Language :: Python :: 3.5'`).
</issue>
<code>
[start of setup.py]
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import os
5
6 from setuptools import setup
7
8
9 with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:
10 LONG_DESCRIPTION = fp.read()
11
12
13 # This seems to be a fairly standard version file pattern.
14 #
15 # Populates the following variables:
16 # __version__
17 # __setuptools_requirement
18 # __wheel_requirement
19 __version__ = ''
20 version_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')
21 with open(version_py_file) as version_py:
22 exec(compile(version_py.read(), version_py_file, 'exec'))
23
24
25 setup(
26 name = 'pex',
27 version = __version__,
28 description = "The PEX packaging toolchain.",
29 long_description = LONG_DESCRIPTION,
30 url = 'https://github.com/pantsbuild/pex',
31 license = 'Apache License, Version 2.0',
32 zip_safe = True,
33 classifiers = [
34 'Intended Audience :: Developers',
35 'License :: OSI Approved :: Apache Software License',
36 'Operating System :: OS Independent',
37 'Programming Language :: Python',
38 ],
39 packages = [
40 'pex',
41 'pex.bin',
42 'pex.commands',
43 ],
44 install_requires = [
45 SETUPTOOLS_REQUIREMENT,
46 ],
47 tests_require = [
48 'mock',
49 'twitter.common.contextutil>=0.3.1,<0.4.0',
50 'twitter.common.lang>=0.3.1,<0.4.0',
51 'twitter.common.testing>=0.3.1,<0.4.0',
52 'twitter.common.dirutil>=0.3.1,<0.4.0',
53 'pytest',
54 ],
55 entry_points = {
56 'distutils.commands': [
57 'bdist_pex = pex.commands.bdist_pex:bdist_pex',
58 ],
59 'console_scripts': [
60 'pex = pex.bin.pex:main',
61 ],
62 },
63 )
64
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,9 +5,11 @@
from setuptools import setup
+with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:
+ LONG_DESCRIPTION = fp.read() + '\n'
with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:
- LONG_DESCRIPTION = fp.read()
+ LONG_DESCRIPTION += fp.read()
# This seems to be a fairly standard version file pattern.
@@ -33,8 +35,17 @@
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
- 'Operating System :: OS Independent',
+ 'Operating System :: Unix',
+ 'Operating System :: POSIX :: Linux',
+ 'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
],
packages = [
'pex',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,9 +5,11 @@\n \n from setuptools import setup\n \n+with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as fp:\n+ LONG_DESCRIPTION = fp.read() + '\\n'\n \n with open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:\n- LONG_DESCRIPTION = fp.read()\n+ LONG_DESCRIPTION += fp.read()\n \n \n # This seems to be a fairly standard version file pattern.\n@@ -33,8 +35,17 @@\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n- 'Operating System :: OS Independent',\n+ 'Operating System :: Unix',\n+ 'Operating System :: POSIX :: Linux',\n+ 'Operating System :: MacOS :: MacOS X',\n 'Programming Language :: Python',\n+ 'Programming Language :: Python :: 2',\n+ 'Programming Language :: Python :: 2.6',\n+ 'Programming Language :: Python :: 2.7',\n+ 'Programming Language :: Python :: 3',\n+ 'Programming Language :: Python :: 3.3',\n+ 'Programming Language :: Python :: 3.4',\n+ 'Programming Language :: Python :: 3.5',\n ],\n packages = [\n 'pex',\n", "issue": "Update PyPI page\nIt would be nice if the `README.rst` were included in the `setup.py` `long_description` in addition to the `CHANGES.rst` so that users browsing PyPI could read the README without having to travel to GitHub.\n\nWould also be nice if the trove classifiers in `setup.py` reflected which versions of Python were officially supported (e.g. `'Programming Language :: Python :: 3.5'`).\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport os\n\nfrom setuptools import setup\n\n\nwith open(os.path.join(os.path.dirname(__file__), 'CHANGES.rst')) as fp:\n LONG_DESCRIPTION = fp.read()\n\n\n# This seems to be a fairly standard version file pattern.\n#\n# Populates the following variables:\n# __version__\n# __setuptools_requirement\n# __wheel_requirement\n__version__ = ''\nversion_py_file = os.path.join(os.path.dirname(__file__), 'pex', 'version.py')\nwith open(version_py_file) as version_py:\n exec(compile(version_py.read(), version_py_file, 'exec'))\n\n\nsetup(\n name = 'pex',\n version = __version__,\n description = \"The PEX packaging toolchain.\",\n long_description = LONG_DESCRIPTION,\n url = 'https://github.com/pantsbuild/pex',\n license = 'Apache License, Version 2.0',\n zip_safe = True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n packages = [\n 'pex',\n 'pex.bin',\n 'pex.commands',\n ],\n install_requires = [\n SETUPTOOLS_REQUIREMENT,\n ],\n tests_require = [\n 'mock',\n 'twitter.common.contextutil>=0.3.1,<0.4.0',\n 'twitter.common.lang>=0.3.1,<0.4.0',\n 'twitter.common.testing>=0.3.1,<0.4.0',\n 'twitter.common.dirutil>=0.3.1,<0.4.0',\n 'pytest',\n ],\n entry_points = {\n 'distutils.commands': [\n 'bdist_pex = pex.commands.bdist_pex:bdist_pex',\n ],\n 'console_scripts': [\n 'pex = pex.bin.pex:main',\n ],\n },\n)\n", "path": "setup.py"}]} | 1,199 | 298 |
gh_patches_debug_4022 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-3030 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use samtranslate `passthrough_metadata` to support ignoring checks on Serverless resources
### Is this feature request related to a new rule or cfn-lint capabilities?
_No response_
### Describe the feature you'd like to request
Based on this: https://github.com/aws/serverless-application-model/pull/2224, it looks like it should be possible to ask SAM to bring all the Metadata stored on the original AWS::Serverless::* resources through to the translated resources. Doing that should allow cfn-lint to check the Metadata section for resource level ignores.
I actually tried changing the parameter myself but couldn't get it working. I must be missing something....
### Describe the solution you'd like
Enable `passthrough_metadata` on the samtranslate `translate` call to bring ALL the metadata through and properly support resource level ignores on serverless resources.
### Additional context
This is where I tried to add the parameter: https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/template/transforms/_sam.py#L155
### Is this something that you'd be interested in working on?
- [X] 👋 I may be able to implement this feature request
### Would this feature include a breaking change?
- [ ] ⚠️ This feature might incur a breaking change
</issue>
<code>
[start of src/cfnlint/template/transforms/_sam.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5
6 from __future__ import annotations
7
8 import logging
9 import os
10 from typing import Any
11
12 import samtranslator
13 from samtranslator.parser import parser
14 from samtranslator.public.exceptions import InvalidDocumentException
15 from samtranslator.sdk import resource
16 from samtranslator.translator.translator import Translator
17
18 from cfnlint.data import Serverless
19 from cfnlint.decode.utils import convert_dict
20 from cfnlint.helpers import format_json_string, load_resource
21 from cfnlint.template.transforms._types import TransformResult
22
23 LOGGER = logging.getLogger("cfnlint")
24
25 samtranslator_logger = logging.getLogger("samtranslator")
26 samtranslator_logger.setLevel(logging.CRITICAL)
27
28
29 # Override SAM validation as cfn-lint does thoese
30 # checks already
31 # pylint: disable=unused-argument
32 def valid_override(self):
33 return resource.SamResourceType.has_value(self.type)
34
35
36 # pylint: disable=redefined-outer-name
37 resource.SamResource.valid = valid_override
38
39
40 class Transform:
41 """
42 Application Serverless Module tranform Wrapper.
43 Based on code from AWS SAM CLI:
44 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py
45 """
46
47 def __init__(self, filename, template, region):
48 """
49 Initialize Transform class
50 """
51 self._filename = filename
52 self._template = template
53 self._region = region
54 self._parameters = {}
55
56 self._managed_policy_map = self.load_managed_policies()
57 self._sam_parser = parser.Parser()
58
59 def template(self):
60 """Get the template"""
61 return self._template
62
63 def load_managed_policies(self):
64 """
65 Load the ManagedPolicies locally, based on the AWS-CLI:
66 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json
67 """
68 return load_resource(Serverless, "ManagedPolicies.json")
69
70 def _replace_local_codeuri(self):
71 """
72 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in
73 AWS::Serverless::Api to a fake S3 Uri. This is to support running the
74 SAM Translator with valid values for these fields. If this is not done,
75 the template is invalid in the eyes of SAM Translator (the translator
76 does not support local paths)
77 """
78
79 all_resources = self._template.get("Resources", {})
80
81 template_globals = self._template.get("Globals", {})
82 auto_publish_alias = template_globals.get("Function", {}).get(
83 "AutoPublishAlias"
84 )
85 if isinstance(auto_publish_alias, dict):
86 if len(auto_publish_alias) == 1:
87 for k, v in auto_publish_alias.items():
88 if k == "Ref":
89 if v in self._template.get("Parameters"):
90 self._parameters[v] = "Alias"
91
92 for _, resource in all_resources.items():
93 resource_type = resource.get("Type")
94 resource_dict = resource.get("Properties")
95
96 if resource_type == "AWS::Serverless::Function":
97 if resource_dict.get("PackageType") == "Image":
98 Transform._update_to_s3_uri("ImageUri", resource_dict)
99 else:
100 Transform._update_to_s3_uri("CodeUri", resource_dict)
101 auto_publish_alias = resource_dict.get("AutoPublishAlias")
102 if isinstance(auto_publish_alias, dict):
103 if len(auto_publish_alias) == 1:
104 for k, v in auto_publish_alias.items():
105 if k == "Ref":
106 if v in self._template.get("Parameters"):
107 self._parameters[v] = "Alias"
108 if resource_type in ["AWS::Serverless::LayerVersion"]:
109 if resource_dict.get("ContentUri"):
110 Transform._update_to_s3_uri("ContentUri", resource_dict)
111 if resource_type == "AWS::Serverless::Application":
112 if resource_dict.get("Location"):
113 if isinstance(resource_dict.get("Location"), dict):
114 resource_dict["Location"] = ""
115 Transform._update_to_s3_uri("Location", resource_dict)
116 if resource_type == "AWS::Serverless::Api":
117 if (
118 "DefinitionBody" not in resource_dict
119 and "Auth" not in resource_dict
120 and "Cors" not in resource_dict
121 and "DisableExecuteApiEndpoint" not in resource_dict
122 ):
123 Transform._update_to_s3_uri("DefinitionUri", resource_dict)
124 else:
125 resource_dict["DefinitionBody"] = ""
126 if resource_type == "AWS::Serverless::StateMachine" and resource_dict.get(
127 "DefinitionUri"
128 ):
129 Transform._update_to_s3_uri("DefinitionUri", resource_dict)
130
131 def transform_template(self):
132 """
133 Transform the Template using the Serverless Application Model.
134 """
135 matches = []
136
137 try:
138 # Output the SAM Translator version in debug mode
139 LOGGER.info("SAM Translator: %s", samtranslator.__version__)
140
141 sam_translator = Translator(
142 managed_policy_map=self._managed_policy_map, sam_parser=self._sam_parser
143 )
144
145 self._replace_local_codeuri()
146
147 # Tell SAM to use the region we're linting in, this has to be
148 # controlled using the default AWS mechanisms, see also:
149 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py
150 LOGGER.info("Setting AWS_DEFAULT_REGION to %s", self._region)
151 os.environ["AWS_DEFAULT_REGION"] = self._region
152
153 self._template = convert_dict(
154 sam_translator.translate(
155 sam_template=self._template, parameter_values=self._parameters
156 )
157 )
158
159 LOGGER.info(
160 "Transformed template: \n%s", format_json_string(self._template)
161 )
162 except InvalidDocumentException as e:
163 # pylint: disable=import-outside-toplevel
164 from cfnlint.match import Match # pylint: disable=cyclic-import
165 from cfnlint.rules import TransformError # pylint: disable=cyclic-import
166
167 message = "Error transforming template: {0}"
168 for cause in e.causes:
169 matches.append(
170 Match(
171 1,
172 1,
173 1,
174 1,
175 self._filename,
176 TransformError(),
177 message.format(cause.message),
178 )
179 )
180 except Exception as e: # pylint: disable=W0703
181 # pylint: disable=import-outside-toplevel
182 from cfnlint.match import Match # pylint: disable=cyclic-import
183 from cfnlint.rules import TransformError # pylint: disable=cyclic-import
184
185 LOGGER.debug("Error transforming template: %s", str(e))
186 LOGGER.debug("Stack trace: %s", e, exc_info=True)
187 message = "Error transforming template: {0}"
188 matches.append(
189 Match(
190 1, 1, 1, 1, self._filename, TransformError(), message.format(str(e))
191 )
192 )
193
194 return matches
195
196 @staticmethod
197 def is_s3_uri(uri):
198 """
199 Checks the uri and determines if it is a valid S3 Uri
200 Parameters
201 ----------
202 uri str, required
203 Uri to check
204 Returns
205 -------
206 bool
207 Returns True if the uri given is an S3 uri, otherwise False
208 """
209 return isinstance(uri, str) and uri.startswith("s3://")
210
211 @staticmethod
212 def _update_to_s3_uri(
213 property_key, resource_property_dict, s3_uri_value="s3://bucket/value"
214 ):
215 """
216 Updates the 'property_key' in the 'resource_property_dict' to the
217 value of 's3_uri_value'
218 Note: The function will mutate the resource_property_dict that is pass
219 in Parameters
220 ----------
221 property_key str, required
222 Key in the resource_property_dict
223 resource_property_dict dict, required
224 Property dictionary of a Resource in the template to replace
225 s3_uri_value str, optional
226 Value to update the value of the property_key to
227 """
228 uri_property = resource_property_dict.get(property_key, ".")
229
230 # ignore if dict or already an S3 Uri
231 if isinstance(uri_property, dict):
232 if len(uri_property) == 1:
233 for k in uri_property.keys():
234 if k in ["Ref", "Fn::Sub"]:
235 resource_property_dict[property_key] = s3_uri_value
236 return
237 if Transform.is_s3_uri(uri_property):
238 return
239
240 resource_property_dict[property_key] = s3_uri_value
241
242
243 def sam(cfn: Any) -> TransformResult:
244 transform = Transform(cfn.filename, cfn.template, cfn.regions[0])
245 matches = transform.transform_template()
246 if matches:
247 return matches, None
248 return matches, transform.template()
249
[end of src/cfnlint/template/transforms/_sam.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/template/transforms/_sam.py b/src/cfnlint/template/transforms/_sam.py
--- a/src/cfnlint/template/transforms/_sam.py
+++ b/src/cfnlint/template/transforms/_sam.py
@@ -152,7 +152,9 @@
self._template = convert_dict(
sam_translator.translate(
- sam_template=self._template, parameter_values=self._parameters
+ sam_template=self._template,
+ parameter_values=self._parameters,
+ passthrough_metadata=True,
)
)
| {"golden_diff": "diff --git a/src/cfnlint/template/transforms/_sam.py b/src/cfnlint/template/transforms/_sam.py\n--- a/src/cfnlint/template/transforms/_sam.py\n+++ b/src/cfnlint/template/transforms/_sam.py\n@@ -152,7 +152,9 @@\n \n self._template = convert_dict(\n sam_translator.translate(\n- sam_template=self._template, parameter_values=self._parameters\n+ sam_template=self._template,\n+ parameter_values=self._parameters,\n+ passthrough_metadata=True,\n )\n )\n", "issue": "Use samtranslate `passthrough_metadata` to support ignoring checks on Serverless resources\n### Is this feature request related to a new rule or cfn-lint capabilities?\n\n_No response_\n\n### Describe the feature you'd like to request\n\nBased on this: https://github.com/aws/serverless-application-model/pull/2224, it looks like it should be possible to ask SAM to bring all the Metadata stored on the original AWS::Serverless::* resources through to the translated resources. Doing that should allow cfn-lint to check the Metadata section for resource level ignores.\r\n\r\nI actually tried changing the parameter myself but couldn't get it working. I must be missing something....\n\n### Describe the solution you'd like\n\nEnable `passthrough_metadata` on the samtranslate `translate` call to bring ALL the metadata through and properly support resource level ignores on serverless resources.\n\n### Additional context\n\nThis is where I tried to add the parameter: https://github.com/aws-cloudformation/cfn-lint/blob/main/src/cfnlint/template/transforms/_sam.py#L155\n\n### Is this something that you'd be interested in working on?\n\n- [X] \ud83d\udc4b I may be able to implement this feature request\n\n### Would this feature include a breaking change?\n\n- [ ] \u26a0\ufe0f This feature might incur a breaking change\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nfrom typing import Any\n\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.public.exceptions import InvalidDocumentException\nfrom samtranslator.sdk import resource\nfrom samtranslator.translator.translator import Translator\n\nfrom cfnlint.data import Serverless\nfrom cfnlint.decode.utils import convert_dict\nfrom cfnlint.helpers import format_json_string, load_resource\nfrom cfnlint.template.transforms._types import TransformResult\n\nLOGGER = logging.getLogger(\"cfnlint\")\n\nsamtranslator_logger = logging.getLogger(\"samtranslator\")\nsamtranslator_logger.setLevel(logging.CRITICAL)\n\n\n# Override SAM validation as cfn-lint does thoese\n# checks already\n# pylint: disable=unused-argument\ndef valid_override(self):\n return resource.SamResourceType.has_value(self.type)\n\n\n# pylint: disable=redefined-outer-name\nresource.SamResource.valid = valid_override\n\n\nclass Transform:\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, \"ManagedPolicies.json\")\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get(\"Resources\", {})\n\n template_globals = self._template.get(\"Globals\", {})\n auto_publish_alias = template_globals.get(\"Function\", {}).get(\n \"AutoPublishAlias\"\n )\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == \"Ref\":\n if v in self._template.get(\"Parameters\"):\n self._parameters[v] = \"Alias\"\n\n for _, resource in all_resources.items():\n resource_type = resource.get(\"Type\")\n resource_dict = resource.get(\"Properties\")\n\n if resource_type == \"AWS::Serverless::Function\":\n if resource_dict.get(\"PackageType\") == \"Image\":\n Transform._update_to_s3_uri(\"ImageUri\", resource_dict)\n else:\n Transform._update_to_s3_uri(\"CodeUri\", resource_dict)\n auto_publish_alias = resource_dict.get(\"AutoPublishAlias\")\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == \"Ref\":\n if v in self._template.get(\"Parameters\"):\n self._parameters[v] = \"Alias\"\n if resource_type in [\"AWS::Serverless::LayerVersion\"]:\n if resource_dict.get(\"ContentUri\"):\n Transform._update_to_s3_uri(\"ContentUri\", resource_dict)\n if resource_type == \"AWS::Serverless::Application\":\n if resource_dict.get(\"Location\"):\n if isinstance(resource_dict.get(\"Location\"), dict):\n resource_dict[\"Location\"] = \"\"\n Transform._update_to_s3_uri(\"Location\", resource_dict)\n if resource_type == \"AWS::Serverless::Api\":\n if (\n \"DefinitionBody\" not in resource_dict\n and \"Auth\" not in resource_dict\n and \"Cors\" not in resource_dict\n and \"DisableExecuteApiEndpoint\" not in resource_dict\n ):\n Transform._update_to_s3_uri(\"DefinitionUri\", resource_dict)\n else:\n resource_dict[\"DefinitionBody\"] = \"\"\n if resource_type == \"AWS::Serverless::StateMachine\" and resource_dict.get(\n \"DefinitionUri\"\n ):\n Transform._update_to_s3_uri(\"DefinitionUri\", resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info(\"SAM Translator: %s\", samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map, sam_parser=self._sam_parser\n )\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info(\"Setting AWS_DEFAULT_REGION to %s\", self._region)\n os.environ[\"AWS_DEFAULT_REGION\"] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(\n sam_template=self._template, parameter_values=self._parameters\n )\n )\n\n LOGGER.info(\n \"Transformed template: \\n%s\", format_json_string(self._template)\n )\n except InvalidDocumentException as e:\n # pylint: disable=import-outside-toplevel\n from cfnlint.match import Match # pylint: disable=cyclic-import\n from cfnlint.rules import TransformError # pylint: disable=cyclic-import\n\n message = \"Error transforming template: {0}\"\n for cause in e.causes:\n matches.append(\n Match(\n 1,\n 1,\n 1,\n 1,\n self._filename,\n TransformError(),\n message.format(cause.message),\n )\n )\n except Exception as e: # pylint: disable=W0703\n # pylint: disable=import-outside-toplevel\n from cfnlint.match import Match # pylint: disable=cyclic-import\n from cfnlint.rules import TransformError # pylint: disable=cyclic-import\n\n LOGGER.debug(\"Error transforming template: %s\", str(e))\n LOGGER.debug(\"Stack trace: %s\", e, exc_info=True)\n message = \"Error transforming template: {0}\"\n matches.append(\n Match(\n 1, 1, 1, 1, self._filename, TransformError(), message.format(str(e))\n )\n )\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, str) and uri.startswith(\"s3://\")\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict, s3_uri_value=\"s3://bucket/value\"\n ):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, \".\")\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k in [\"Ref\", \"Fn::Sub\"]:\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n\n\ndef sam(cfn: Any) -> TransformResult:\n transform = Transform(cfn.filename, cfn.template, cfn.regions[0])\n matches = transform.transform_template()\n if matches:\n return matches, None\n return matches, transform.template()\n", "path": "src/cfnlint/template/transforms/_sam.py"}]} | 3,372 | 122 |
gh_patches_debug_13814 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1043 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Permission denied when installed this environment
Hello.
I'm not sure the permission is about the pre-commit but it looks likes so.
Here is my config
````yaml
- repo: git://github.com/dnephin/pre-commit-golang
rev: master
hooks:
- id: go-fmt
- repo: git://github.com/golangci/golangci-lint
rev: master
hooks:
- id: golangci-lint
args:
- --config .golangci.yml
- repo: git://github.com/detailyang/pre-commit-shell
rev: v1.0.6
hooks:
- id: shell-lint
args: [--color=always, "--exclude=SC1090,SC1091,SC2206"]
````
it looks like python3.7 shutil.rmtree raise the erorr as the following:
````python
An unexpected error has occurred: PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/[email protected]/.gitignore'
Traceback (most recent call last):
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 447, in _rmtree_safe_fd
os.unlink(entry.name, dir_fd=topfd)
PermissionError: [Errno 13] Permission denied: '.gitignore'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/pre_commit/error_handler.py", line 46, in error_handler
yield
File "/usr/local/lib/python3.7/site-packages/pre_commit/main.py", line 294, in main
return run(args.config, store, args)
File "/usr/local/lib/python3.7/site-packages/pre_commit/commands/run.py", line 285, in run
install_hook_envs(hooks, store)
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 215, in install_hook_envs
hook.install()
File "/usr/local/lib/python3.7/site-packages/pre_commit/repository.py", line 90, in install
rmtree(self.prefix.path(venv))
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 171, in rmtree
shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 491, in rmtree
_rmtree_safe_fd(fd, path, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 429, in _rmtree_safe_fd
_rmtree_safe_fd(dirfd, fullname, onerror)
[Previous line repeated 2 more times]
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py", line 449, in _rmtree_safe_fd
onerror(os.unlink, fullname, sys.exc_info())
File "/usr/local/lib/python3.7/site-packages/pre_commit/util.py", line 168, in handle_remove_readonly
func(path)
PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/[email protected]/.gitignore'
````
</issue>
<code>
[start of pre_commit/util.py]
1 from __future__ import unicode_literals
2
3 import contextlib
4 import errno
5 import os.path
6 import shutil
7 import stat
8 import subprocess
9 import sys
10 import tempfile
11
12 import six
13
14 from pre_commit import five
15 from pre_commit import parse_shebang
16
17 if sys.version_info >= (3, 7): # pragma: no cover (PY37+)
18 from importlib.resources import open_binary
19 from importlib.resources import read_text
20 else: # pragma: no cover (<PY37)
21 from importlib_resources import open_binary
22 from importlib_resources import read_text
23
24
25 def mkdirp(path):
26 try:
27 os.makedirs(path)
28 except OSError:
29 if not os.path.exists(path):
30 raise
31
32
33 @contextlib.contextmanager
34 def clean_path_on_failure(path):
35 """Cleans up the directory on an exceptional failure."""
36 try:
37 yield
38 except BaseException:
39 if os.path.exists(path):
40 rmtree(path)
41 raise
42
43
44 @contextlib.contextmanager
45 def noop_context():
46 yield
47
48
49 @contextlib.contextmanager
50 def tmpdir():
51 """Contextmanager to create a temporary directory. It will be cleaned up
52 afterwards.
53 """
54 tempdir = tempfile.mkdtemp()
55 try:
56 yield tempdir
57 finally:
58 rmtree(tempdir)
59
60
61 def resource_bytesio(filename):
62 return open_binary('pre_commit.resources', filename)
63
64
65 def resource_text(filename):
66 return read_text('pre_commit.resources', filename)
67
68
69 def make_executable(filename):
70 original_mode = os.stat(filename).st_mode
71 os.chmod(
72 filename, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,
73 )
74
75
76 class CalledProcessError(RuntimeError):
77 def __init__(self, returncode, cmd, expected_returncode, output=None):
78 super(CalledProcessError, self).__init__(
79 returncode, cmd, expected_returncode, output,
80 )
81 self.returncode = returncode
82 self.cmd = cmd
83 self.expected_returncode = expected_returncode
84 self.output = output
85
86 def to_bytes(self):
87 output = []
88 for maybe_text in self.output:
89 if maybe_text:
90 output.append(
91 b'\n ' +
92 five.to_bytes(maybe_text).replace(b'\n', b'\n '),
93 )
94 else:
95 output.append(b'(none)')
96
97 return b''.join((
98 five.to_bytes(
99 'Command: {!r}\n'
100 'Return code: {}\n'
101 'Expected return code: {}\n'.format(
102 self.cmd, self.returncode, self.expected_returncode,
103 ),
104 ),
105 b'Output: ', output[0], b'\n',
106 b'Errors: ', output[1], b'\n',
107 ))
108
109 def to_text(self):
110 return self.to_bytes().decode('UTF-8')
111
112 if six.PY2: # pragma: no cover (py2)
113 __str__ = to_bytes
114 __unicode__ = to_text
115 else: # pragma: no cover (py3)
116 __bytes__ = to_bytes
117 __str__ = to_text
118
119
120 def cmd_output(*cmd, **kwargs):
121 retcode = kwargs.pop('retcode', 0)
122 encoding = kwargs.pop('encoding', 'UTF-8')
123
124 popen_kwargs = {
125 'stdin': subprocess.PIPE,
126 'stdout': subprocess.PIPE,
127 'stderr': subprocess.PIPE,
128 }
129
130 # py2/py3 on windows are more strict about the types here
131 cmd = tuple(five.n(arg) for arg in cmd)
132 kwargs['env'] = {
133 five.n(key): five.n(value)
134 for key, value in kwargs.pop('env', {}).items()
135 } or None
136
137 try:
138 cmd = parse_shebang.normalize_cmd(cmd)
139 except parse_shebang.ExecutableNotFoundError as e:
140 returncode, stdout, stderr = e.to_output()
141 else:
142 popen_kwargs.update(kwargs)
143 proc = subprocess.Popen(cmd, **popen_kwargs)
144 stdout, stderr = proc.communicate()
145 returncode = proc.returncode
146 if encoding is not None and stdout is not None:
147 stdout = stdout.decode(encoding)
148 if encoding is not None and stderr is not None:
149 stderr = stderr.decode(encoding)
150
151 if retcode is not None and retcode != returncode:
152 raise CalledProcessError(
153 returncode, cmd, retcode, output=(stdout, stderr),
154 )
155
156 return returncode, stdout, stderr
157
158
159 def rmtree(path):
160 """On windows, rmtree fails for readonly dirs."""
161 def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
162 excvalue = exc[1]
163 if (
164 func in (os.rmdir, os.remove, os.unlink) and
165 excvalue.errno == errno.EACCES
166 ):
167 os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
168 func(path)
169 else:
170 raise
171 shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)
172
173
174 def parse_version(s):
175 """poor man's version comparison"""
176 return tuple(int(p) for p in s.split('.'))
177
[end of pre_commit/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/util.py b/pre_commit/util.py
--- a/pre_commit/util.py
+++ b/pre_commit/util.py
@@ -158,13 +158,14 @@
def rmtree(path):
"""On windows, rmtree fails for readonly dirs."""
- def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)
+ def handle_remove_readonly(func, path, exc):
excvalue = exc[1]
if (
func in (os.rmdir, os.remove, os.unlink) and
excvalue.errno == errno.EACCES
):
- os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ for p in (path, os.path.dirname(path)):
+ os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)
func(path)
else:
raise
| {"golden_diff": "diff --git a/pre_commit/util.py b/pre_commit/util.py\n--- a/pre_commit/util.py\n+++ b/pre_commit/util.py\n@@ -158,13 +158,14 @@\n \n def rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n- def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n+ def handle_remove_readonly(func, path, exc):\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ):\n- os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n+ for p in (path, os.path.dirname(path)):\n+ os.chmod(p, os.stat(p).st_mode | stat.S_IWUSR)\n func(path)\n else:\n raise\n", "issue": "Permission denied when installed this environment\nHello.\r\n\r\nI'm not sure the permission is about the pre-commit but it looks likes so.\r\n\r\nHere is my config\r\n\r\n````yaml\r\n- repo: git://github.com/dnephin/pre-commit-golang\r\n rev: master\r\n hooks:\r\n - id: go-fmt\r\n\r\n- repo: git://github.com/golangci/golangci-lint\r\n rev: master\r\n hooks:\r\n - id: golangci-lint\r\n args:\r\n - --config .golangci.yml\r\n\r\n- repo: git://github.com/detailyang/pre-commit-shell\r\n rev: v1.0.6\r\n hooks:\r\n - id: shell-lint\r\n args: [--color=always, \"--exclude=SC1090,SC1091,SC2206\"]\r\n````\r\n\r\nit looks like python3.7 shutil.rmtree raise the erorr as the following:\r\n````python\r\nAn unexpected error has occurred: PermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/[email protected]/.gitignore'\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 447, in _rmtree_safe_fd\r\n os.unlink(entry.name, dir_fd=topfd)\r\nPermissionError: [Errno 13] Permission denied: '.gitignore'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/error_handler.py\", line 46, in error_handler\r\n yield\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/main.py\", line 294, in main\r\n return run(args.config, store, args)\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/commands/run.py\", line 285, in run\r\n install_hook_envs(hooks, store)\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/repository.py\", line 215, in install_hook_envs\r\n hook.install()\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/repository.py\", line 90, in install\r\n rmtree(self.prefix.path(venv))\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/util.py\", line 171, in rmtree\r\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 491, in rmtree\r\n _rmtree_safe_fd(fd, path, onerror)\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 429, in _rmtree_safe_fd\r\n _rmtree_safe_fd(dirfd, fullname, onerror)\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 429, in _rmtree_safe_fd\r\n _rmtree_safe_fd(dirfd, fullname, onerror)\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 429, in _rmtree_safe_fd\r\n _rmtree_safe_fd(dirfd, fullname, onerror)\r\n [Previous line repeated 2 more times]\r\n File \"/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/shutil.py\", line 449, in _rmtree_safe_fd\r\n onerror(os.unlink, fullname, sys.exc_info())\r\n File \"/usr/local/lib/python3.7/site-packages/pre_commit/util.py\", line 168, in handle_remove_readonly\r\n func(path)\r\nPermissionError: [Errno 13] Permission denied: '/Users/detailyang/.cache/pre-commit/repo2ba1f3b5/golangenv-default/pkg/mod/github.com/!burnt!sushi/[email protected]/.gitignore'\r\n````\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport errno\nimport os.path\nimport shutil\nimport stat\nimport subprocess\nimport sys\nimport tempfile\n\nimport six\n\nfrom pre_commit import five\nfrom pre_commit import parse_shebang\n\nif sys.version_info >= (3, 7): # pragma: no cover (PY37+)\n from importlib.resources import open_binary\n from importlib.resources import read_text\nelse: # pragma: no cover (<PY37)\n from importlib_resources import open_binary\n from importlib_resources import read_text\n\n\ndef mkdirp(path):\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.exists(path):\n raise\n\n\[email protected]\ndef clean_path_on_failure(path):\n \"\"\"Cleans up the directory on an exceptional failure.\"\"\"\n try:\n yield\n except BaseException:\n if os.path.exists(path):\n rmtree(path)\n raise\n\n\[email protected]\ndef noop_context():\n yield\n\n\[email protected]\ndef tmpdir():\n \"\"\"Contextmanager to create a temporary directory. It will be cleaned up\n afterwards.\n \"\"\"\n tempdir = tempfile.mkdtemp()\n try:\n yield tempdir\n finally:\n rmtree(tempdir)\n\n\ndef resource_bytesio(filename):\n return open_binary('pre_commit.resources', filename)\n\n\ndef resource_text(filename):\n return read_text('pre_commit.resources', filename)\n\n\ndef make_executable(filename):\n original_mode = os.stat(filename).st_mode\n os.chmod(\n filename, original_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH,\n )\n\n\nclass CalledProcessError(RuntimeError):\n def __init__(self, returncode, cmd, expected_returncode, output=None):\n super(CalledProcessError, self).__init__(\n returncode, cmd, expected_returncode, output,\n )\n self.returncode = returncode\n self.cmd = cmd\n self.expected_returncode = expected_returncode\n self.output = output\n\n def to_bytes(self):\n output = []\n for maybe_text in self.output:\n if maybe_text:\n output.append(\n b'\\n ' +\n five.to_bytes(maybe_text).replace(b'\\n', b'\\n '),\n )\n else:\n output.append(b'(none)')\n\n return b''.join((\n five.to_bytes(\n 'Command: {!r}\\n'\n 'Return code: {}\\n'\n 'Expected return code: {}\\n'.format(\n self.cmd, self.returncode, self.expected_returncode,\n ),\n ),\n b'Output: ', output[0], b'\\n',\n b'Errors: ', output[1], b'\\n',\n ))\n\n def to_text(self):\n return self.to_bytes().decode('UTF-8')\n\n if six.PY2: # pragma: no cover (py2)\n __str__ = to_bytes\n __unicode__ = to_text\n else: # pragma: no cover (py3)\n __bytes__ = to_bytes\n __str__ = to_text\n\n\ndef cmd_output(*cmd, **kwargs):\n retcode = kwargs.pop('retcode', 0)\n encoding = kwargs.pop('encoding', 'UTF-8')\n\n popen_kwargs = {\n 'stdin': subprocess.PIPE,\n 'stdout': subprocess.PIPE,\n 'stderr': subprocess.PIPE,\n }\n\n # py2/py3 on windows are more strict about the types here\n cmd = tuple(five.n(arg) for arg in cmd)\n kwargs['env'] = {\n five.n(key): five.n(value)\n for key, value in kwargs.pop('env', {}).items()\n } or None\n\n try:\n cmd = parse_shebang.normalize_cmd(cmd)\n except parse_shebang.ExecutableNotFoundError as e:\n returncode, stdout, stderr = e.to_output()\n else:\n popen_kwargs.update(kwargs)\n proc = subprocess.Popen(cmd, **popen_kwargs)\n stdout, stderr = proc.communicate()\n returncode = proc.returncode\n if encoding is not None and stdout is not None:\n stdout = stdout.decode(encoding)\n if encoding is not None and stderr is not None:\n stderr = stderr.decode(encoding)\n\n if retcode is not None and retcode != returncode:\n raise CalledProcessError(\n returncode, cmd, retcode, output=(stdout, stderr),\n )\n\n return returncode, stdout, stderr\n\n\ndef rmtree(path):\n \"\"\"On windows, rmtree fails for readonly dirs.\"\"\"\n def handle_remove_readonly(func, path, exc): # pragma: no cover (windows)\n excvalue = exc[1]\n if (\n func in (os.rmdir, os.remove, os.unlink) and\n excvalue.errno == errno.EACCES\n ):\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)\n func(path)\n else:\n raise\n shutil.rmtree(path, ignore_errors=False, onerror=handle_remove_readonly)\n\n\ndef parse_version(s):\n \"\"\"poor man's version comparison\"\"\"\n return tuple(int(p) for p in s.split('.'))\n", "path": "pre_commit/util.py"}]} | 3,098 | 204 |
gh_patches_debug_8079 | rasdani/github-patches | git_diff | apache__airflow-16601 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Airflow logging secrets masker assumes dict_key is type `str`
**Apache Airflow version**: 2.1.0
**What happened**:
Airflow logging assume dict_key is type `str`
```
logging.info("Dictionary where key is int type: %s", modified_table_mapping)
File "/usr/lib64/python3.6/logging/__init__.py", line 1902, in info
root.info(msg, *args, **kwargs)
File "/usr/lib64/python3.6/logging/__init__.py", line 1308, in info
self._log(INFO, msg, args, **kwargs)
File "/usr/lib64/python3.6/logging/__init__.py", line 1444, in _log
self.handle(record)
File "/usr/lib64/python3.6/logging/__init__.py", line 1453, in handle
if (not self.disabled) and self.filter(record):
File "/usr/lib64/python3.6/logging/__init__.py", line 720, in filter
result = f.filter(record)
File "/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py", line 157, in filter
record.__dict__[k] = self.redact(v)
File "/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py", line 193, in redact
return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}
File "/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py", line 193, in <dictcomp>
return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}
File "/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py", line 189, in redact
if name and should_hide_value_for_key(name):
File "/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py", line 74, in should_hide_value_for_key
name = name.strip().lower()
AttributeError: 'int' object has no attribute 'strip'
```
**How to reproduce it**:
Define a dictionary where the type of keys is `int` and print it in any Airflow tasks.
</issue>
<code>
[start of airflow/utils/log/secrets_masker.py]
1 # Licensed to the Apache Software Foundation (ASF) under one
2 # or more contributor license agreements. See the NOTICE file
3 # distributed with this work for additional information
4 # regarding copyright ownership. The ASF licenses this file
5 # to you under the Apache License, Version 2.0 (the
6 # "License"); you may not use this file except in compliance
7 # with the License. You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing,
12 # software distributed under the License is distributed on an
13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14 # KIND, either express or implied. See the License for the
15 # specific language governing permissions and limitations
16 # under the License.
17 """Mask sensitive information from logs"""
18 import collections
19 import logging
20 import re
21 from typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union
22
23 from airflow.compat.functools import cache, cached_property
24
25 if TYPE_CHECKING:
26 from airflow.typing_compat import RePatternType
27
28 RedactableItem = TypeVar('RedactableItem')
29
30
31 log = logging.getLogger(__name__)
32
33
34 DEFAULT_SENSITIVE_FIELDS = frozenset(
35 {
36 'access_token',
37 'api_key',
38 'apikey',
39 'authorization',
40 'passphrase',
41 'passwd',
42 'password',
43 'private_key',
44 'secret',
45 }
46 )
47 """Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive"""
48
49
50 @cache
51 def get_sensitive_variables_fields():
52 """Get comma-separated sensitive Variable Fields from airflow.cfg."""
53 from airflow.configuration import conf
54
55 sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()
56 sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')
57 if sensitive_variable_fields:
58 sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})
59 return sensitive_fields
60
61
62 def should_hide_value_for_key(name):
63 """Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden"""
64 from airflow import settings
65
66 if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:
67 name = name.strip().lower()
68 return any(s in name for s in get_sensitive_variables_fields())
69 return False
70
71
72 def mask_secret(secret: Union[str, dict, Iterable], name: str = None) -> None:
73 """
74 Mask a secret from appearing in the task logs.
75
76 If ``name`` is provided, then it will only be masked if the name matches
77 one of the configured "sensitive" names.
78
79 If ``secret`` is a dict or a iterable (excluding str) then it will be
80 recursively walked and keys with sensitive names will be hidden.
81 """
82 # Delay import
83 from airflow import settings
84
85 # Filtering all log messages is not a free process, so we only do it when
86 # running tasks
87 if not settings.MASK_SECRETS_IN_LOGS or not secret:
88 return
89
90 _secrets_masker().add_mask(secret, name)
91
92
93 def redact(value: "RedactableItem", name: str = None) -> "RedactableItem":
94 """Redact any secrets found in ``value``."""
95 return _secrets_masker().redact(value, name)
96
97
98 @cache
99 def _secrets_masker() -> "SecretsMasker":
100
101 for flt in logging.getLogger('airflow.task').filters:
102 if isinstance(flt, SecretsMasker):
103 return flt
104 raise RuntimeError("No SecretsMasker found!")
105
106
107 class SecretsMasker(logging.Filter):
108 """Redact secrets from logs"""
109
110 replacer: Optional["RePatternType"] = None
111 patterns: Set[str]
112
113 ALREADY_FILTERED_FLAG = "__SecretsMasker_filtered"
114 MAX_RECURSION_DEPTH = 5
115
116 def __init__(self):
117 super().__init__()
118 self.patterns = set()
119
120 @cached_property
121 def _record_attrs_to_ignore(self) -> Iterable[str]:
122 # Doing log.info(..., extra={'foo': 2}) sets extra properties on
123 # record, i.e. record.foo. And we need to filter those too. Fun
124 #
125 # Create a record, and look at what attributes are on it, and ignore
126 # all the default ones!
127
128 record = logging.getLogRecordFactory()(
129 # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,
130 "x",
131 logging.INFO,
132 __file__,
133 1,
134 "",
135 tuple(),
136 exc_info=None,
137 func="funcname",
138 )
139 return frozenset(record.__dict__).difference({'msg', 'args'})
140
141 def filter(self, record) -> bool:
142 if self.ALREADY_FILTERED_FLAG in record.__dict__:
143 # Filters are attached to multiple handlers and logs, keep a
144 # "private" flag that stops us needing to process it more than once
145 return True
146
147 if self.replacer:
148 for k, v in record.__dict__.items():
149 if k in self._record_attrs_to_ignore:
150 continue
151 record.__dict__[k] = self.redact(v)
152 if record.exc_info and record.exc_info[1] is not None:
153 exc = record.exc_info[1]
154 # I'm not sure if this is a good idea!
155 exc.args = (self.redact(v) for v in exc.args)
156 record.__dict__[self.ALREADY_FILTERED_FLAG] = True
157
158 return True
159
160 def _redact_all(self, item: "RedactableItem", depth: int) -> "RedactableItem":
161 if depth > self.MAX_RECURSION_DEPTH or isinstance(item, str):
162 return '***'
163 if isinstance(item, dict):
164 return {dict_key: self._redact_all(subval, depth + 1) for dict_key, subval in item.items()}
165 elif isinstance(item, (tuple, set)):
166 # Turn set in to tuple!
167 return tuple(self._redact_all(subval, depth + 1) for subval in item)
168 elif isinstance(item, list):
169 return list(self._redact_all(subval, depth + 1) for subval in item)
170 else:
171 return item
172
173 # pylint: disable=too-many-return-statements
174 def _redact(self, item: "RedactableItem", name: Optional[str], depth: int) -> "RedactableItem":
175 # Avoid spending too much effort on redacting on deeply nested
176 # structures. This also avoid infinite recursion if a structure has
177 # reference to self.
178 if depth > self.MAX_RECURSION_DEPTH:
179 return item
180 try:
181 if name and should_hide_value_for_key(name):
182 return self._redact_all(item, depth)
183 if isinstance(item, dict):
184 return {
185 dict_key: self._redact(subval, name=dict_key, depth=(depth + 1))
186 for dict_key, subval in item.items()
187 }
188 elif isinstance(item, str):
189 if self.replacer:
190 # We can't replace specific values, but the key-based redacting
191 # can still happen, so we can't short-circuit, we need to walk
192 # the structure.
193 return self.replacer.sub('***', item)
194 return item
195 elif isinstance(item, (tuple, set)):
196 # Turn set in to tuple!
197 return tuple(self._redact(subval, name=None, depth=(depth + 1)) for subval in item)
198 elif isinstance(item, list):
199 return [self._redact(subval, name=None, depth=(depth + 1)) for subval in item]
200 else:
201 return item
202 # I think this should never happen, but it does not hurt to leave it just in case
203 except Exception as e: # pylint: disable=broad-except
204 log.warning(
205 "Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. "
206 "Error was: %s: %s",
207 item,
208 type(e).__name__,
209 str(e),
210 )
211 return item
212
213 def redact(self, item: "RedactableItem", name: Optional[str] = None) -> "RedactableItem":
214 """Redact an any secrets found in ``item``, if it is a string.
215
216 If ``name`` is given, and it's a "sensitive" name (see
217 :func:`should_hide_value_for_key`) then all string values in the item
218 is redacted.
219 """
220 return self._redact(item, name, depth=0)
221
222 # pylint: enable=too-many-return-statements
223 def add_mask(self, secret: Union[str, dict, Iterable], name: str = None):
224 """Add a new secret to be masked to this filter instance."""
225 if isinstance(secret, dict):
226 for k, v in secret.items():
227 self.add_mask(v, k)
228 elif isinstance(secret, str):
229 if not secret:
230 return
231 pattern = re.escape(secret)
232 if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):
233 self.patterns.add(pattern)
234 self.replacer = re.compile('|'.join(self.patterns))
235 elif isinstance(secret, collections.abc.Iterable):
236 for v in secret:
237 self.add_mask(v, name)
238
[end of airflow/utils/log/secrets_masker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py
--- a/airflow/utils/log/secrets_masker.py
+++ b/airflow/utils/log/secrets_masker.py
@@ -63,7 +63,7 @@
"""Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden"""
from airflow import settings
- if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:
+ if isinstance(name, str) and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:
name = name.strip().lower()
return any(s in name for s in get_sensitive_variables_fields())
return False
| {"golden_diff": "diff --git a/airflow/utils/log/secrets_masker.py b/airflow/utils/log/secrets_masker.py\n--- a/airflow/utils/log/secrets_masker.py\n+++ b/airflow/utils/log/secrets_masker.py\n@@ -63,7 +63,7 @@\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n \n- if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n+ if isinstance(name, str) and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n", "issue": "Airflow logging secrets masker assumes dict_key is type `str`\n**Apache Airflow version**: 2.1.0\r\n\r\n**What happened**:\r\nAirflow logging assume dict_key is type `str`\r\n```\r\n logging.info(\"Dictionary where key is int type: %s\", modified_table_mapping)\r\n File \"/usr/lib64/python3.6/logging/__init__.py\", line 1902, in info\r\n root.info(msg, *args, **kwargs)\r\n File \"/usr/lib64/python3.6/logging/__init__.py\", line 1308, in info\r\n self._log(INFO, msg, args, **kwargs)\r\n File \"/usr/lib64/python3.6/logging/__init__.py\", line 1444, in _log\r\n self.handle(record)\r\n File \"/usr/lib64/python3.6/logging/__init__.py\", line 1453, in handle\r\n if (not self.disabled) and self.filter(record):\r\n File \"/usr/lib64/python3.6/logging/__init__.py\", line 720, in filter\r\n result = f.filter(record)\r\n File \"/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py\", line 157, in filter\r\n record.__dict__[k] = self.redact(v)\r\n File \"/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py\", line 193, in redact\r\n return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}\r\n File \"/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py\", line 193, in <dictcomp>\r\n return {dict_key: self.redact(subval, dict_key) for dict_key, subval in item.items()}\r\n File \"/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py\", line 189, in redact\r\n if name and should_hide_value_for_key(name):\r\n File \"/bb/bin/airflow_env/lib/python3.6/site-packages/airflow/utils/log/secrets_masker.py\", line 74, in should_hide_value_for_key\r\n name = name.strip().lower()\r\nAttributeError: 'int' object has no attribute 'strip'\r\n```\r\n\r\n**How to reproduce it**:\r\nDefine a dictionary where the type of keys is `int` and print it in any Airflow tasks.\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\"\"\"Mask sensitive information from logs\"\"\"\nimport collections\nimport logging\nimport re\nfrom typing import TYPE_CHECKING, Iterable, Optional, Set, TypeVar, Union\n\nfrom airflow.compat.functools import cache, cached_property\n\nif TYPE_CHECKING:\n from airflow.typing_compat import RePatternType\n\n RedactableItem = TypeVar('RedactableItem')\n\n\nlog = logging.getLogger(__name__)\n\n\nDEFAULT_SENSITIVE_FIELDS = frozenset(\n {\n 'access_token',\n 'api_key',\n 'apikey',\n 'authorization',\n 'passphrase',\n 'passwd',\n 'password',\n 'private_key',\n 'secret',\n }\n)\n\"\"\"Names of fields (Connection extra, Variable key name etc.) that are deemed sensitive\"\"\"\n\n\n@cache\ndef get_sensitive_variables_fields():\n \"\"\"Get comma-separated sensitive Variable Fields from airflow.cfg.\"\"\"\n from airflow.configuration import conf\n\n sensitive_fields = DEFAULT_SENSITIVE_FIELDS.copy()\n sensitive_variable_fields = conf.get('core', 'sensitive_var_conn_names')\n if sensitive_variable_fields:\n sensitive_fields |= frozenset({field.strip() for field in sensitive_variable_fields.split(',')})\n return sensitive_fields\n\n\ndef should_hide_value_for_key(name):\n \"\"\"Should the value for this given name (Variable name, or key in conn.extra_dejson) be hidden\"\"\"\n from airflow import settings\n\n if name and settings.HIDE_SENSITIVE_VAR_CONN_FIELDS:\n name = name.strip().lower()\n return any(s in name for s in get_sensitive_variables_fields())\n return False\n\n\ndef mask_secret(secret: Union[str, dict, Iterable], name: str = None) -> None:\n \"\"\"\n Mask a secret from appearing in the task logs.\n\n If ``name`` is provided, then it will only be masked if the name matches\n one of the configured \"sensitive\" names.\n\n If ``secret`` is a dict or a iterable (excluding str) then it will be\n recursively walked and keys with sensitive names will be hidden.\n \"\"\"\n # Delay import\n from airflow import settings\n\n # Filtering all log messages is not a free process, so we only do it when\n # running tasks\n if not settings.MASK_SECRETS_IN_LOGS or not secret:\n return\n\n _secrets_masker().add_mask(secret, name)\n\n\ndef redact(value: \"RedactableItem\", name: str = None) -> \"RedactableItem\":\n \"\"\"Redact any secrets found in ``value``.\"\"\"\n return _secrets_masker().redact(value, name)\n\n\n@cache\ndef _secrets_masker() -> \"SecretsMasker\":\n\n for flt in logging.getLogger('airflow.task').filters:\n if isinstance(flt, SecretsMasker):\n return flt\n raise RuntimeError(\"No SecretsMasker found!\")\n\n\nclass SecretsMasker(logging.Filter):\n \"\"\"Redact secrets from logs\"\"\"\n\n replacer: Optional[\"RePatternType\"] = None\n patterns: Set[str]\n\n ALREADY_FILTERED_FLAG = \"__SecretsMasker_filtered\"\n MAX_RECURSION_DEPTH = 5\n\n def __init__(self):\n super().__init__()\n self.patterns = set()\n\n @cached_property\n def _record_attrs_to_ignore(self) -> Iterable[str]:\n # Doing log.info(..., extra={'foo': 2}) sets extra properties on\n # record, i.e. record.foo. And we need to filter those too. Fun\n #\n # Create a record, and look at what attributes are on it, and ignore\n # all the default ones!\n\n record = logging.getLogRecordFactory()(\n # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,\n \"x\",\n logging.INFO,\n __file__,\n 1,\n \"\",\n tuple(),\n exc_info=None,\n func=\"funcname\",\n )\n return frozenset(record.__dict__).difference({'msg', 'args'})\n\n def filter(self, record) -> bool:\n if self.ALREADY_FILTERED_FLAG in record.__dict__:\n # Filters are attached to multiple handlers and logs, keep a\n # \"private\" flag that stops us needing to process it more than once\n return True\n\n if self.replacer:\n for k, v in record.__dict__.items():\n if k in self._record_attrs_to_ignore:\n continue\n record.__dict__[k] = self.redact(v)\n if record.exc_info and record.exc_info[1] is not None:\n exc = record.exc_info[1]\n # I'm not sure if this is a good idea!\n exc.args = (self.redact(v) for v in exc.args)\n record.__dict__[self.ALREADY_FILTERED_FLAG] = True\n\n return True\n\n def _redact_all(self, item: \"RedactableItem\", depth: int) -> \"RedactableItem\":\n if depth > self.MAX_RECURSION_DEPTH or isinstance(item, str):\n return '***'\n if isinstance(item, dict):\n return {dict_key: self._redact_all(subval, depth + 1) for dict_key, subval in item.items()}\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact_all(subval, depth + 1) for subval in item)\n elif isinstance(item, list):\n return list(self._redact_all(subval, depth + 1) for subval in item)\n else:\n return item\n\n # pylint: disable=too-many-return-statements\n def _redact(self, item: \"RedactableItem\", name: Optional[str], depth: int) -> \"RedactableItem\":\n # Avoid spending too much effort on redacting on deeply nested\n # structures. This also avoid infinite recursion if a structure has\n # reference to self.\n if depth > self.MAX_RECURSION_DEPTH:\n return item\n try:\n if name and should_hide_value_for_key(name):\n return self._redact_all(item, depth)\n if isinstance(item, dict):\n return {\n dict_key: self._redact(subval, name=dict_key, depth=(depth + 1))\n for dict_key, subval in item.items()\n }\n elif isinstance(item, str):\n if self.replacer:\n # We can't replace specific values, but the key-based redacting\n # can still happen, so we can't short-circuit, we need to walk\n # the structure.\n return self.replacer.sub('***', item)\n return item\n elif isinstance(item, (tuple, set)):\n # Turn set in to tuple!\n return tuple(self._redact(subval, name=None, depth=(depth + 1)) for subval in item)\n elif isinstance(item, list):\n return [self._redact(subval, name=None, depth=(depth + 1)) for subval in item]\n else:\n return item\n # I think this should never happen, but it does not hurt to leave it just in case\n except Exception as e: # pylint: disable=broad-except\n log.warning(\n \"Unable to redact %r, please report this via <https://github.com/apache/airflow/issues>. \"\n \"Error was: %s: %s\",\n item,\n type(e).__name__,\n str(e),\n )\n return item\n\n def redact(self, item: \"RedactableItem\", name: Optional[str] = None) -> \"RedactableItem\":\n \"\"\"Redact an any secrets found in ``item``, if it is a string.\n\n If ``name`` is given, and it's a \"sensitive\" name (see\n :func:`should_hide_value_for_key`) then all string values in the item\n is redacted.\n \"\"\"\n return self._redact(item, name, depth=0)\n\n # pylint: enable=too-many-return-statements\n def add_mask(self, secret: Union[str, dict, Iterable], name: str = None):\n \"\"\"Add a new secret to be masked to this filter instance.\"\"\"\n if isinstance(secret, dict):\n for k, v in secret.items():\n self.add_mask(v, k)\n elif isinstance(secret, str):\n if not secret:\n return\n pattern = re.escape(secret)\n if pattern not in self.patterns and (not name or should_hide_value_for_key(name)):\n self.patterns.add(pattern)\n self.replacer = re.compile('|'.join(self.patterns))\n elif isinstance(secret, collections.abc.Iterable):\n for v in secret:\n self.add_mask(v, name)\n", "path": "airflow/utils/log/secrets_masker.py"}]} | 3,757 | 152 |
gh_patches_debug_13819 | rasdani/github-patches | git_diff | pypa__pipenv-5380 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Could not find a version that satisfies the requirement" for package in private repository starting from pipenv==2022.8.31
### Issue description
With a Pipfile like this...
```
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[[source]]
url = "https://pypi.ourinternalrepo.com/simple"
verify_ssl = true
name = "our-pypi"
[packages]
..etc..
ourinternalpackage = {version = "==0.5.9", index = "our-pypi"}
```
that generates a Pipfile.lock like this...
```
{
"_meta": {
"hash": {
"sha256": "something"
},
"pipfile-spec": 6,
"requires": {
"python_version": "3.8"
},
"sources": [
{
"name": "pypi",
"url": "https://pypi.org/simple",
"verify_ssl": true
},
{
"name": "our-pypi",
"url": "https://pypi.ourinternalrepo.com/simple",
"verify_ssl": true
}
]
},
"default": {
"ourinternalpackage ": {
"hashes": [
"sha256:something"
],
"index": "our-pypi",
"version": "==0.1.1"
},
...etc
```
but when we execute `pipenv sync --verbose`, it generates a pip install command like so
```
/usr/local/lib/python3.8/site-packages/pipenv/patched/pip/__pip-runner__.py install -i https://pypi.org/simple --no-input --upgrade --no-deps --exists-action=i -r /tmp/pipenv-etc-requirements/pipenv-etc-hashed-reqs.txt
```
(note "-i https://pypi.org/simple")
and as might be expected, this fails with `6.552 ERROR: Could not find a version that satisfies the requirement ourinternalpackage==0.1.1 (from versions: none)`. This appears to have started happening with 2022.8.31--when we `pip install -U pipenv==2022.8.30` and run `pipenv sync` from there, the package resolves successfully.
### Expected result
I would expect the package install to generate with a -i pointed at the `our-pypi` source rather than the default.
### Actual result
```
6.552 ERROR: Could not find a version that satisfies the requirement ourinternalpackage==0.1.1 (from versions: none)
```
There are also bunches of these, that may or may not be irrelevant. I can't provide the full --verbose output because cleansing it of work-internal information would be too much hassle, but I have it around if anyone wants specific snippets.
```
An error occurred while installing alembic==1.8.0; python_version >= '3.7' --hash=sha256:a2d4d90da70b30e70352cd9455e35873a255a31402a438fe24815758d7a0e5e1 --hash=sha256:b5ae4bbfc7d1302ed413989d39474d102e7cfa158f6d5969d2497955ffe85a30! Will try again.
```
### Steps to replicate
See above
-------------------------------------------------------------------------------
Please run `$ pipenv --support`, and paste the results here. Don't put backticks (`` ` ``) around it! The output already contains Markdown formatting.
<details><summary>$ pipenv --support</summary>
Pipenv version: `'2022.9.8'`
Pipenv location: `'/usr/local/lib/python3.8/site-packages/pipenv'`
Python location: `'/usr/local/bin/python'`
OS Name: `'posix'`
User pip version: `'22.2.2'`
user Python installations found:
- `3.8.14`: `/usr/local/bin/python`
- `3.8.14`: `/usr/local/bin/python3`
- `3.8.14`: `/usr/local/bin/python3.8`
- `3.7.3`: `/usr/bin/python3`
- `3.7.3`: `/usr/bin/python3.7`
- `3.7.3`: `/usr/bin/python3.7m`
- `2.7.16`: `/usr/bin/python`
- `2.7.16`: `/usr/bin/python2.7`
- `2.7.16`: `/usr/bin/python2`
PEP 508 Information:
```
{'implementation_name': 'cpython',
'implementation_version': '3.8.14',
'os_name': 'posix',
'platform_machine': 'x86_64',
'platform_python_implementation': 'CPython',
'platform_release': '5.4.0-1051-gcp',
'platform_system': 'Linux',
'platform_version': '#55~18.04.1-Ubuntu SMP Sun Aug 1 20:38:04 UTC 2021',
'python_full_version': '3.8.14',
'python_version': '3.8',
'sys_platform': 'linux'}
```
System environment variables:
- `PIPENV_VENV_IN_PROJECT`
- `HOSTNAME`
- `PYTHON_VERSION`
- `PWD`
- `PYTHON_SETUPTOOLS_VERSION`
- `PIPENV_CACHE_DIR`
- `HOME`
- `LANG`
- `LS_COLORS`
- `GPG_KEY`
- `PYTHONPATH`
- `TERM`
- `SHLVL`
- `PYTHON_PIP_VERSION`
- `PYTHON_GET_PIP_SHA256`
- `PYTHON_GET_PIP_URL`
- `PATH`
- `_`
- `PIP_DISABLE_PIP_VERSION_CHECK`
- `PIP_PYTHON_PATH`
- `PYTHONDONTWRITEBYTECODE`
- `PYTHONFINDER_IGNORE_UNSUPPORTED`
Pipenv–specific environment variables:
- `PIPENV_VENV_IN_PROJECT`: `1`
- `PIPENV_CACHE_DIR`: `/tmp`
Debug–specific environment variables:
- `PATH`: `/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin`
- `LANG`: `C.UTF-8`
- `PWD`: `/code`
---------------------------
Contents of `Pipfile` ('/code/Pipfile'):
```toml
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
[dev-packages]
[requires]
python_version = "3.8"
```
Contents of `Pipfile.lock` ('/code/Pipfile.lock'):
(see above for selected snippets, full output not included for reasons stated above)
</issue>
<code>
[start of pipenv/utils/indexes.py]
1 from __future__ import annotations
2
3 import re
4 from collections.abc import Mapping
5
6 from pipenv.exceptions import PipenvUsageError
7 from pipenv.patched.pip._vendor.urllib3.util import parse_url
8 from pipenv.utils.constants import MYPY_RUNNING
9
10 from .internet import create_mirror_source, is_pypi_url
11
12 if MYPY_RUNNING:
13 from typing import List, Optional, Union # noqa
14
15 from pipenv.project import Project, TSource # noqa
16
17
18 def prepare_pip_source_args(sources, pip_args=None):
19 if pip_args is None:
20 pip_args = []
21 if sources:
22 # Add the source to pip.
23 package_url = sources[0].get("url")
24 if not package_url:
25 raise PipenvUsageError("[[source]] section does not contain a URL.")
26 pip_args.extend(["-i", package_url])
27 # Trust the host if it's not verified.
28 if not sources[0].get("verify_ssl", True):
29 url_parts = parse_url(package_url)
30 url_port = f":{url_parts.port}" if url_parts.port else ""
31 pip_args.extend(["--trusted-host", f"{url_parts.host}{url_port}"])
32 # Add additional sources as extra indexes.
33 if len(sources) > 1:
34 for source in sources[1:]:
35 url = source.get("url")
36 if not url: # not harmless, just don't continue
37 continue
38 pip_args.extend(["--extra-index-url", url])
39 # Trust the host if it's not verified.
40 if not source.get("verify_ssl", True):
41 url_parts = parse_url(url)
42 url_port = f":{url_parts.port}" if url_parts.port else ""
43 pip_args.extend(["--trusted-host", f"{url_parts.host}{url_port}"])
44 return pip_args
45
46
47 def get_project_index(
48 project: Project,
49 index: Optional[Union[str, TSource]] = None,
50 trusted_hosts: Optional[List[str]] = None,
51 ) -> TSource:
52 from pipenv.project import SourceNotFound
53
54 if trusted_hosts is None:
55 trusted_hosts = []
56 if isinstance(index, Mapping):
57 return project.find_source(index.get("url"))
58 try:
59 source = project.find_source(index)
60 except SourceNotFound:
61 index_url = parse_url(index)
62 src_name = project.src_name_from_url(index)
63 verify_ssl = index_url.host not in trusted_hosts
64 source = {"url": index, "verify_ssl": verify_ssl, "name": src_name}
65 return source
66
67
68 def get_source_list(
69 project: Project,
70 index: Optional[Union[str, TSource]] = None,
71 extra_indexes: Optional[Union[str, List[str]]] = None,
72 trusted_hosts: Optional[List[str]] = None,
73 pypi_mirror: Optional[str] = None,
74 ) -> List[TSource]:
75 sources: List[TSource] = []
76 if index:
77 sources.append(get_project_index(project, index))
78 if extra_indexes:
79 if isinstance(extra_indexes, str):
80 extra_indexes = [extra_indexes]
81
82 for source in extra_indexes:
83 extra_src = get_project_index(project, source)
84 if not sources or extra_src["url"] != sources[0]["url"]:
85 sources.append(extra_src)
86
87 for source in project.pipfile_sources:
88 if not sources or source["url"] != sources[0]["url"]:
89 sources.append(source)
90
91 if not sources:
92 sources = project.pipfile_sources[:]
93 if pypi_mirror:
94 sources = [
95 create_mirror_source(pypi_mirror) if is_pypi_url(source["url"]) else source
96 for source in sources
97 ]
98 return sources
99
100
101 def parse_indexes(line, strict=False):
102 from argparse import ArgumentParser
103
104 comment_re = re.compile(r"(?:^|\s+)#.*$")
105 line = comment_re.sub("", line)
106 parser = ArgumentParser("indexes", allow_abbrev=False)
107 parser.add_argument("-i", "--index-url", dest="index")
108 parser.add_argument("--extra-index-url", dest="extra_index")
109 parser.add_argument("--trusted-host", dest="trusted_host")
110 args, remainder = parser.parse_known_args(line.split())
111 index = args.index
112 extra_index = args.extra_index
113 trusted_host = args.trusted_host
114 if (
115 strict
116 and sum(bool(arg) for arg in (index, extra_index, trusted_host, remainder)) > 1
117 ):
118 raise ValueError("Index arguments must be on their own lines.")
119 return index, extra_index, trusted_host, remainder
120
[end of pipenv/utils/indexes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pipenv/utils/indexes.py b/pipenv/utils/indexes.py
--- a/pipenv/utils/indexes.py
+++ b/pipenv/utils/indexes.py
@@ -84,12 +84,12 @@
if not sources or extra_src["url"] != sources[0]["url"]:
sources.append(extra_src)
- for source in project.pipfile_sources:
+ for source in project.sources:
if not sources or source["url"] != sources[0]["url"]:
sources.append(source)
if not sources:
- sources = project.pipfile_sources[:]
+ sources = project.sources[:]
if pypi_mirror:
sources = [
create_mirror_source(pypi_mirror) if is_pypi_url(source["url"]) else source
| {"golden_diff": "diff --git a/pipenv/utils/indexes.py b/pipenv/utils/indexes.py\n--- a/pipenv/utils/indexes.py\n+++ b/pipenv/utils/indexes.py\n@@ -84,12 +84,12 @@\n if not sources or extra_src[\"url\"] != sources[0][\"url\"]:\n sources.append(extra_src)\n \n- for source in project.pipfile_sources:\n+ for source in project.sources:\n if not sources or source[\"url\"] != sources[0][\"url\"]:\n sources.append(source)\n \n if not sources:\n- sources = project.pipfile_sources[:]\n+ sources = project.sources[:]\n if pypi_mirror:\n sources = [\n create_mirror_source(pypi_mirror) if is_pypi_url(source[\"url\"]) else source\n", "issue": "\"Could not find a version that satisfies the requirement\" for package in private repository starting from pipenv==2022.8.31 \n### Issue description\r\n\r\nWith a Pipfile like this...\r\n```\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[[source]]\r\nurl = \"https://pypi.ourinternalrepo.com/simple\"\r\nverify_ssl = true\r\nname = \"our-pypi\"\r\n\r\n[packages]\r\n..etc..\r\nourinternalpackage = {version = \"==0.5.9\", index = \"our-pypi\"}\r\n```\r\nthat generates a Pipfile.lock like this...\r\n```\r\n{\r\n \"_meta\": {\r\n \"hash\": {\r\n \"sha256\": \"something\"\r\n },\r\n \"pipfile-spec\": 6,\r\n \"requires\": {\r\n \"python_version\": \"3.8\"\r\n },\r\n \"sources\": [\r\n {\r\n \"name\": \"pypi\",\r\n \"url\": \"https://pypi.org/simple\",\r\n \"verify_ssl\": true\r\n },\r\n {\r\n \"name\": \"our-pypi\",\r\n \"url\": \"https://pypi.ourinternalrepo.com/simple\",\r\n \"verify_ssl\": true\r\n }\r\n ]\r\n },\r\n \"default\": {\r\n \"ourinternalpackage \": {\r\n \"hashes\": [\r\n \"sha256:something\"\r\n ],\r\n \"index\": \"our-pypi\",\r\n \"version\": \"==0.1.1\"\r\n },\r\n ...etc\r\n```\r\n\r\nbut when we execute `pipenv sync --verbose`, it generates a pip install command like so\r\n\r\n```\r\n/usr/local/lib/python3.8/site-packages/pipenv/patched/pip/__pip-runner__.py install -i https://pypi.org/simple --no-input --upgrade --no-deps --exists-action=i -r /tmp/pipenv-etc-requirements/pipenv-etc-hashed-reqs.txt\r\n```\r\n\r\n(note \"-i https://pypi.org/simple\")\r\n\r\nand as might be expected, this fails with `6.552 ERROR: Could not find a version that satisfies the requirement ourinternalpackage==0.1.1 (from versions: none)`. This appears to have started happening with 2022.8.31--when we `pip install -U pipenv==2022.8.30` and run `pipenv sync` from there, the package resolves successfully.\r\n\r\n### Expected result\r\n\r\nI would expect the package install to generate with a -i pointed at the `our-pypi` source rather than the default.\r\n\r\n### Actual result\r\n\r\n```\r\n6.552 ERROR: Could not find a version that satisfies the requirement ourinternalpackage==0.1.1 (from versions: none)\r\n```\r\n\r\nThere are also bunches of these, that may or may not be irrelevant. I can't provide the full --verbose output because cleansing it of work-internal information would be too much hassle, but I have it around if anyone wants specific snippets.\r\n\r\n```\r\nAn error occurred while installing alembic==1.8.0; python_version >= '3.7' --hash=sha256:a2d4d90da70b30e70352cd9455e35873a255a31402a438fe24815758d7a0e5e1 --hash=sha256:b5ae4bbfc7d1302ed413989d39474d102e7cfa158f6d5969d2497955ffe85a30! Will try again.\r\n```\r\n### Steps to replicate\r\n\r\nSee above\r\n\r\n-------------------------------------------------------------------------------\r\n\r\nPlease run `$ pipenv --support`, and paste the results here. Don't put backticks (`` ` ``) around it! The output already contains Markdown formatting.\r\n<details><summary>$ pipenv --support</summary>\r\n\r\nPipenv version: `'2022.9.8'`\r\n\r\nPipenv location: `'/usr/local/lib/python3.8/site-packages/pipenv'`\r\n\r\nPython location: `'/usr/local/bin/python'`\r\n\r\nOS Name: `'posix'`\r\n\r\nUser pip version: `'22.2.2'`\r\n\r\nuser Python installations found:\r\n\r\n - `3.8.14`: `/usr/local/bin/python`\r\n - `3.8.14`: `/usr/local/bin/python3`\r\n - `3.8.14`: `/usr/local/bin/python3.8`\r\n - `3.7.3`: `/usr/bin/python3`\r\n - `3.7.3`: `/usr/bin/python3.7`\r\n - `3.7.3`: `/usr/bin/python3.7m`\r\n - `2.7.16`: `/usr/bin/python`\r\n - `2.7.16`: `/usr/bin/python2.7`\r\n - `2.7.16`: `/usr/bin/python2`\r\n\r\nPEP 508 Information:\r\n\r\n```\r\n{'implementation_name': 'cpython',\r\n 'implementation_version': '3.8.14',\r\n 'os_name': 'posix',\r\n 'platform_machine': 'x86_64',\r\n 'platform_python_implementation': 'CPython',\r\n 'platform_release': '5.4.0-1051-gcp',\r\n 'platform_system': 'Linux',\r\n 'platform_version': '#55~18.04.1-Ubuntu SMP Sun Aug 1 20:38:04 UTC 2021',\r\n 'python_full_version': '3.8.14',\r\n 'python_version': '3.8',\r\n 'sys_platform': 'linux'}\r\n```\r\n\r\nSystem environment variables:\r\n\r\n - `PIPENV_VENV_IN_PROJECT`\r\n - `HOSTNAME`\r\n - `PYTHON_VERSION`\r\n - `PWD`\r\n - `PYTHON_SETUPTOOLS_VERSION`\r\n - `PIPENV_CACHE_DIR`\r\n - `HOME`\r\n - `LANG`\r\n - `LS_COLORS`\r\n - `GPG_KEY`\r\n - `PYTHONPATH`\r\n - `TERM`\r\n - `SHLVL`\r\n - `PYTHON_PIP_VERSION`\r\n - `PYTHON_GET_PIP_SHA256`\r\n - `PYTHON_GET_PIP_URL`\r\n - `PATH`\r\n - `_`\r\n - `PIP_DISABLE_PIP_VERSION_CHECK`\r\n - `PIP_PYTHON_PATH`\r\n - `PYTHONDONTWRITEBYTECODE`\r\n - `PYTHONFINDER_IGNORE_UNSUPPORTED`\r\n\r\nPipenv\u2013specific environment variables:\r\n\r\n - `PIPENV_VENV_IN_PROJECT`: `1`\r\n - `PIPENV_CACHE_DIR`: `/tmp`\r\n\r\nDebug\u2013specific environment variables:\r\n\r\n - `PATH`: `/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin`\r\n - `LANG`: `C.UTF-8`\r\n - `PWD`: `/code`\r\n\r\n\r\n---------------------------\r\n\r\nContents of `Pipfile` ('/code/Pipfile'):\r\n\r\n```toml\r\n[[source]]\r\nurl = \"https://pypi.org/simple\"\r\nverify_ssl = true\r\nname = \"pypi\"\r\n\r\n[packages]\r\n\r\n[dev-packages]\r\n\r\n[requires]\r\npython_version = \"3.8\"\r\n\r\n```\r\n\r\n\r\nContents of `Pipfile.lock` ('/code/Pipfile.lock'):\r\n\r\n(see above for selected snippets, full output not included for reasons stated above)\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom collections.abc import Mapping\n\nfrom pipenv.exceptions import PipenvUsageError\nfrom pipenv.patched.pip._vendor.urllib3.util import parse_url\nfrom pipenv.utils.constants import MYPY_RUNNING\n\nfrom .internet import create_mirror_source, is_pypi_url\n\nif MYPY_RUNNING:\n from typing import List, Optional, Union # noqa\n\n from pipenv.project import Project, TSource # noqa\n\n\ndef prepare_pip_source_args(sources, pip_args=None):\n if pip_args is None:\n pip_args = []\n if sources:\n # Add the source to pip.\n package_url = sources[0].get(\"url\")\n if not package_url:\n raise PipenvUsageError(\"[[source]] section does not contain a URL.\")\n pip_args.extend([\"-i\", package_url])\n # Trust the host if it's not verified.\n if not sources[0].get(\"verify_ssl\", True):\n url_parts = parse_url(package_url)\n url_port = f\":{url_parts.port}\" if url_parts.port else \"\"\n pip_args.extend([\"--trusted-host\", f\"{url_parts.host}{url_port}\"])\n # Add additional sources as extra indexes.\n if len(sources) > 1:\n for source in sources[1:]:\n url = source.get(\"url\")\n if not url: # not harmless, just don't continue\n continue\n pip_args.extend([\"--extra-index-url\", url])\n # Trust the host if it's not verified.\n if not source.get(\"verify_ssl\", True):\n url_parts = parse_url(url)\n url_port = f\":{url_parts.port}\" if url_parts.port else \"\"\n pip_args.extend([\"--trusted-host\", f\"{url_parts.host}{url_port}\"])\n return pip_args\n\n\ndef get_project_index(\n project: Project,\n index: Optional[Union[str, TSource]] = None,\n trusted_hosts: Optional[List[str]] = None,\n) -> TSource:\n from pipenv.project import SourceNotFound\n\n if trusted_hosts is None:\n trusted_hosts = []\n if isinstance(index, Mapping):\n return project.find_source(index.get(\"url\"))\n try:\n source = project.find_source(index)\n except SourceNotFound:\n index_url = parse_url(index)\n src_name = project.src_name_from_url(index)\n verify_ssl = index_url.host not in trusted_hosts\n source = {\"url\": index, \"verify_ssl\": verify_ssl, \"name\": src_name}\n return source\n\n\ndef get_source_list(\n project: Project,\n index: Optional[Union[str, TSource]] = None,\n extra_indexes: Optional[Union[str, List[str]]] = None,\n trusted_hosts: Optional[List[str]] = None,\n pypi_mirror: Optional[str] = None,\n) -> List[TSource]:\n sources: List[TSource] = []\n if index:\n sources.append(get_project_index(project, index))\n if extra_indexes:\n if isinstance(extra_indexes, str):\n extra_indexes = [extra_indexes]\n\n for source in extra_indexes:\n extra_src = get_project_index(project, source)\n if not sources or extra_src[\"url\"] != sources[0][\"url\"]:\n sources.append(extra_src)\n\n for source in project.pipfile_sources:\n if not sources or source[\"url\"] != sources[0][\"url\"]:\n sources.append(source)\n\n if not sources:\n sources = project.pipfile_sources[:]\n if pypi_mirror:\n sources = [\n create_mirror_source(pypi_mirror) if is_pypi_url(source[\"url\"]) else source\n for source in sources\n ]\n return sources\n\n\ndef parse_indexes(line, strict=False):\n from argparse import ArgumentParser\n\n comment_re = re.compile(r\"(?:^|\\s+)#.*$\")\n line = comment_re.sub(\"\", line)\n parser = ArgumentParser(\"indexes\", allow_abbrev=False)\n parser.add_argument(\"-i\", \"--index-url\", dest=\"index\")\n parser.add_argument(\"--extra-index-url\", dest=\"extra_index\")\n parser.add_argument(\"--trusted-host\", dest=\"trusted_host\")\n args, remainder = parser.parse_known_args(line.split())\n index = args.index\n extra_index = args.extra_index\n trusted_host = args.trusted_host\n if (\n strict\n and sum(bool(arg) for arg in (index, extra_index, trusted_host, remainder)) > 1\n ):\n raise ValueError(\"Index arguments must be on their own lines.\")\n return index, extra_index, trusted_host, remainder\n", "path": "pipenv/utils/indexes.py"}]} | 3,370 | 171 |
gh_patches_debug_19082 | rasdani/github-patches | git_diff | mkdocs__mkdocs-1322 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Version to 0.17.0
See discussion in #1166.
</issue>
<code>
[start of mkdocs/contrib/legacy_search/__init__.py]
1 # coding: utf-8
2
3 from __future__ import absolute_import, unicode_literals
4
5 import os
6 import logging
7 from mkdocs import utils
8 from mkdocs.plugins import BasePlugin
9 from mkdocs.contrib.legacy_search.search_index import SearchIndex
10
11
12 log = logging.getLogger(__name__)
13
14
15 class SearchPlugin(BasePlugin):
16 """ Add a search feature to MkDocs. """
17
18 def on_config(self, config, **kwargs):
19 "Add plugin templates and scripts to config."
20 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
21 config['theme'].dirs.append(path)
22 config['theme'].static_templates.add('search.html')
23 config['extra_javascript'].append('search/require.js')
24 config['extra_javascript'].append('search/search.js')
25 return config
26
27 def on_pre_build(self, config, **kwargs):
28 "Create search index instance for later use."
29 self.search_index = SearchIndex()
30
31 def on_page_context(self, context, **kwargs):
32 "Add page to search index."
33 self.search_index.add_entry_from_context(context['page'])
34
35 def on_post_build(self, config, **kwargs):
36 "Build search index."
37 search_index = self.search_index.generate_search_index()
38 json_output_path = os.path.join(config['site_dir'], 'search', 'search_index.json')
39 utils.write_file(search_index.encode('utf-8'), json_output_path)
40
[end of mkdocs/contrib/legacy_search/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/contrib/legacy_search/__init__.py b/mkdocs/contrib/legacy_search/__init__.py
--- a/mkdocs/contrib/legacy_search/__init__.py
+++ b/mkdocs/contrib/legacy_search/__init__.py
@@ -17,11 +17,13 @@
def on_config(self, config, **kwargs):
"Add plugin templates and scripts to config."
- path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
- config['theme'].dirs.append(path)
- config['theme'].static_templates.add('search.html')
- config['extra_javascript'].append('search/require.js')
- config['extra_javascript'].append('search/search.js')
+ if 'include_search_page' in config['theme'] and config['theme']['include_search_page']:
+ config['theme'].static_templates.add('search.html')
+ if not ('search_index_only' in config['theme'] and config['theme']['search_index_only']):
+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
+ config['theme'].dirs.append(path)
+ config['extra_javascript'].append('search/require.js')
+ config['extra_javascript'].append('search/search.js')
return config
def on_pre_build(self, config, **kwargs):
| {"golden_diff": "diff --git a/mkdocs/contrib/legacy_search/__init__.py b/mkdocs/contrib/legacy_search/__init__.py\n--- a/mkdocs/contrib/legacy_search/__init__.py\n+++ b/mkdocs/contrib/legacy_search/__init__.py\n@@ -17,11 +17,13 @@\n \n def on_config(self, config, **kwargs):\n \"Add plugin templates and scripts to config.\"\n- path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n- config['theme'].dirs.append(path)\n- config['theme'].static_templates.add('search.html')\n- config['extra_javascript'].append('search/require.js')\n- config['extra_javascript'].append('search/search.js')\n+ if 'include_search_page' in config['theme'] and config['theme']['include_search_page']:\n+ config['theme'].static_templates.add('search.html')\n+ if not ('search_index_only' in config['theme'] and config['theme']['search_index_only']):\n+ path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n+ config['theme'].dirs.append(path)\n+ config['extra_javascript'].append('search/require.js')\n+ config['extra_javascript'].append('search/search.js')\n return config\n \n def on_pre_build(self, config, **kwargs):\n", "issue": "Version to 0.17.0\nSee discussion in #1166.\n", "before_files": [{"content": "# coding: utf-8\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport os\nimport logging\nfrom mkdocs import utils\nfrom mkdocs.plugins import BasePlugin\nfrom mkdocs.contrib.legacy_search.search_index import SearchIndex\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SearchPlugin(BasePlugin):\n \"\"\" Add a search feature to MkDocs. \"\"\"\n\n def on_config(self, config, **kwargs):\n \"Add plugin templates and scripts to config.\"\n path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')\n config['theme'].dirs.append(path)\n config['theme'].static_templates.add('search.html')\n config['extra_javascript'].append('search/require.js')\n config['extra_javascript'].append('search/search.js')\n return config\n\n def on_pre_build(self, config, **kwargs):\n \"Create search index instance for later use.\"\n self.search_index = SearchIndex()\n\n def on_page_context(self, context, **kwargs):\n \"Add page to search index.\"\n self.search_index.add_entry_from_context(context['page'])\n\n def on_post_build(self, config, **kwargs):\n \"Build search index.\"\n search_index = self.search_index.generate_search_index()\n json_output_path = os.path.join(config['site_dir'], 'search', 'search_index.json')\n utils.write_file(search_index.encode('utf-8'), json_output_path)\n", "path": "mkdocs/contrib/legacy_search/__init__.py"}]} | 942 | 301 |
gh_patches_debug_35568 | rasdani/github-patches | git_diff | rotki__rotki-839 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing token from balances, requires pagination for aleth.io
## Problem Definition
I'm missing a token from my balances. When I query aleth.io API for my token balances I get a paginated response back. Rotki does only load tokens from the first page (10 items).
</issue>
<code>
[start of rotkehlchen/externalapis/alethio.py]
1 import logging
2 from json.decoder import JSONDecodeError
3 from typing import Any, Dict, List, Optional, Union, overload
4
5 import gevent
6 import requests
7 from eth_utils.address import to_checksum_address
8 from typing_extensions import Literal
9
10 from rotkehlchen.assets.asset import EthereumToken
11 from rotkehlchen.db.dbhandler import DBHandler
12 from rotkehlchen.errors import RemoteError
13 from rotkehlchen.externalapis.interface import ExternalServiceWithApiKey
14 from rotkehlchen.fval import FVal
15 from rotkehlchen.logging import RotkehlchenLogsAdapter
16 from rotkehlchen.typing import ChecksumEthAddress, EthTokenInfo, ExternalService
17 from rotkehlchen.user_messages import MessagesAggregator
18 from rotkehlchen.utils.serialization import rlk_jsonloads_dict
19
20 logger = logging.getLogger(__name__)
21 log = RotkehlchenLogsAdapter(logger)
22
23
24 class Alethio(ExternalServiceWithApiKey):
25
26 def __init__(
27 self,
28 database: DBHandler,
29 msg_aggregator: MessagesAggregator,
30 all_eth_tokens: List[EthTokenInfo],
31 ) -> None:
32 super().__init__(database=database, service_name=ExternalService.ALETHIO)
33 self.msg_aggregator = msg_aggregator
34 self.session = requests.session()
35 self.all_tokens = all_eth_tokens
36 self.session.headers.update({'User-Agent': 'rotkehlchen'})
37
38 @overload # noqa: F811
39 def _query( # pylint: disable=no-self-use
40 self,
41 root_endpoint: Literal['accounts'],
42 path: str,
43 ) -> List[Dict[str, Any]]:
44 ...
45
46 @overload # noqa: F811
47 def _query( # pylint: disable=no-self-use
48 self,
49 root_endpoint: Literal['foo'],
50 path: str,
51 ) -> Dict[str, Any]:
52 ...
53
54 def _query(self, root_endpoint: str, path: str) -> Union[Dict[str, Any], List]: # noqa: F811
55 query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}'
56 log.debug(f'Querying alethio for {query_str}')
57
58 api_key = self._get_api_key()
59 if api_key:
60 self.session.headers.update({'Authorization': f'Bearer {api_key}'})
61
62 backoff = 1
63 backoff_limit = 13
64 while backoff < backoff_limit:
65 try:
66 response = self.session.get(query_str)
67 except requests.exceptions.ConnectionError as e:
68 if 'Max retries exceeded with url' in str(e):
69 log.debug(
70 f'Got max retries exceeded from alethio. Will '
71 f'backoff for {backoff} seconds.',
72 )
73 gevent.sleep(backoff)
74 backoff = backoff * 2
75 if backoff >= backoff_limit:
76 raise RemoteError(
77 'Getting alethio max connections error even '
78 'after we incrementally backed off',
79 )
80 continue
81
82 raise RemoteError(f'Alethio API request failed due to {str(e)}')
83
84 if response.status_code == 429:
85 log.debug(
86 f'Got response: {response.text} from alethio. Will '
87 f'backoff for {backoff} seconds.',
88 )
89 gevent.sleep(backoff)
90 backoff = backoff * 2
91 if backoff >= backoff_limit:
92 raise RemoteError(
93 'Alethio keeps returning rate limit errors even '
94 'after we incrementally backed off',
95 )
96 continue
97
98 if response.status_code != 200:
99 raise RemoteError(
100 f'Alethio API request {response.url} failed '
101 f'with HTTP status code {response.status_code} and text '
102 f'{response.text}',
103 )
104
105 try:
106 json_ret = rlk_jsonloads_dict(response.text)
107 except JSONDecodeError:
108 raise RemoteError(f'alethio returned invalid JSON response: {response.text}')
109
110 data = json_ret.get('data', None)
111 if data is None:
112 errors = json_ret.get('errors', None)
113 if errors is None:
114 msg = f'Unexpected alethio response: {response.text}'
115 else:
116 msg = str(errors)
117 raise RemoteError(f'alethio response error: {msg}')
118
119 # if we got here we should return
120 break
121
122 return data
123
124 def token_address_to_identifier(self, address: ChecksumEthAddress) -> Optional[EthTokenInfo]:
125 # TODO: Cache these stuff in a mapping
126 for token in self.all_tokens:
127 if token.address == address:
128 return token
129
130 return None
131
132 def get_token_balances(self, account: ChecksumEthAddress) -> Dict[EthereumToken, FVal]:
133 """Auto-detect which tokens are owned and get token balances for the account
134
135 The returned balance is already normalized for the token's decimals.
136
137 May raise:
138 - RemoteError if there is a problem contacting aleth.io
139 """
140 balances = {}
141 data = self._query(root_endpoint='accounts', path=f'{account}/tokenBalances')
142 for entry in data:
143 entry_type = entry.get('type', None)
144 if entry_type == 'TokenBalance':
145
146 attributes = entry.get('attributes', None)
147 balance = None
148 if attributes is not None:
149 balance = attributes.get('balance', None)
150 if balance is None:
151 continue
152
153 relationships = entry.get('relationships', None)
154 if relationships is None:
155 continue
156 token = relationships.get('token', None)
157 if token is None:
158 continue
159 if 'data' not in token:
160 continue
161 if 'id' not in token['data']:
162 continue
163
164 token_address = to_checksum_address(token['data']['id'])
165 token_info = self.token_address_to_identifier(token_address)
166 if token_info is None:
167 continue
168
169 amount = FVal(balance) / (FVal(10) ** FVal(token_info.decimal))
170 balances[EthereumToken(token_info.symbol)] = amount
171
172 return balances
173
[end of rotkehlchen/externalapis/alethio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/rotkehlchen/externalapis/alethio.py b/rotkehlchen/externalapis/alethio.py
--- a/rotkehlchen/externalapis/alethio.py
+++ b/rotkehlchen/externalapis/alethio.py
@@ -40,6 +40,7 @@
self,
root_endpoint: Literal['accounts'],
path: str,
+ full_query_str: Optional[str] = None,
) -> List[Dict[str, Any]]:
...
@@ -48,11 +49,21 @@
self,
root_endpoint: Literal['foo'],
path: str,
+ full_query_str: Optional[str] = None,
) -> Dict[str, Any]:
...
- def _query(self, root_endpoint: str, path: str) -> Union[Dict[str, Any], List]: # noqa: F811
- query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}'
+ def _query( # noqa: F811
+ self,
+ root_endpoint: str,
+ path: str,
+ full_query_str: Optional[str] = None,
+ ) -> Union[Dict[str, Any], List]: # noqa: F811
+ if full_query_str:
+ # If this is a pagination call
+ query_str = full_query_str
+ else:
+ query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}?page[limit]=100'
log.debug(f'Querying alethio for {query_str}')
api_key = self._get_api_key()
@@ -116,6 +127,34 @@
msg = str(errors)
raise RemoteError(f'alethio response error: {msg}')
+ has_next = False
+ try:
+ has_next = json_ret['meta']['page']['hasNext']
+ except KeyError:
+ raise RemoteError(
+ f'Alethio response does not contain pagination information: {response.text}',
+ )
+
+ if has_next:
+ try:
+ link = json_ret['links']['next']
+ except KeyError:
+ raise RemoteError(
+ f'Alethio response does not contain next page link: {response.text}',
+ )
+
+ next_data = self._query( # type: ignore
+ root_endpoint=root_endpoint,
+ path=path,
+ full_query_str=link,
+ )
+ if root_endpoint == 'accounts':
+ data.extend(next_data)
+ else:
+ raise AssertionError(
+ 'Have not yet implemented alethio endpoints returning non lists',
+ )
+
# if we got here we should return
break
| {"golden_diff": "diff --git a/rotkehlchen/externalapis/alethio.py b/rotkehlchen/externalapis/alethio.py\n--- a/rotkehlchen/externalapis/alethio.py\n+++ b/rotkehlchen/externalapis/alethio.py\n@@ -40,6 +40,7 @@\n self,\n root_endpoint: Literal['accounts'],\n path: str,\n+ full_query_str: Optional[str] = None,\n ) -> List[Dict[str, Any]]:\n ...\n \n@@ -48,11 +49,21 @@\n self,\n root_endpoint: Literal['foo'],\n path: str,\n+ full_query_str: Optional[str] = None,\n ) -> Dict[str, Any]:\n ...\n \n- def _query(self, root_endpoint: str, path: str) -> Union[Dict[str, Any], List]: # noqa: F811\n- query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}'\n+ def _query( # noqa: F811\n+ self,\n+ root_endpoint: str,\n+ path: str,\n+ full_query_str: Optional[str] = None,\n+ ) -> Union[Dict[str, Any], List]: # noqa: F811\n+ if full_query_str:\n+ # If this is a pagination call\n+ query_str = full_query_str\n+ else:\n+ query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}?page[limit]=100'\n log.debug(f'Querying alethio for {query_str}')\n \n api_key = self._get_api_key()\n@@ -116,6 +127,34 @@\n msg = str(errors)\n raise RemoteError(f'alethio response error: {msg}')\n \n+ has_next = False\n+ try:\n+ has_next = json_ret['meta']['page']['hasNext']\n+ except KeyError:\n+ raise RemoteError(\n+ f'Alethio response does not contain pagination information: {response.text}',\n+ )\n+\n+ if has_next:\n+ try:\n+ link = json_ret['links']['next']\n+ except KeyError:\n+ raise RemoteError(\n+ f'Alethio response does not contain next page link: {response.text}',\n+ )\n+\n+ next_data = self._query( # type: ignore\n+ root_endpoint=root_endpoint,\n+ path=path,\n+ full_query_str=link,\n+ )\n+ if root_endpoint == 'accounts':\n+ data.extend(next_data)\n+ else:\n+ raise AssertionError(\n+ 'Have not yet implemented alethio endpoints returning non lists',\n+ )\n+\n # if we got here we should return\n break\n", "issue": "Missing token from balances, requires pagination for aleth.io\n## Problem Definition\r\n\r\nI'm missing a token from my balances. When I query aleth.io API for my token balances I get a paginated response back. Rotki does only load tokens from the first page (10 items).\n", "before_files": [{"content": "import logging\nfrom json.decoder import JSONDecodeError\nfrom typing import Any, Dict, List, Optional, Union, overload\n\nimport gevent\nimport requests\nfrom eth_utils.address import to_checksum_address\nfrom typing_extensions import Literal\n\nfrom rotkehlchen.assets.asset import EthereumToken\nfrom rotkehlchen.db.dbhandler import DBHandler\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.externalapis.interface import ExternalServiceWithApiKey\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\nfrom rotkehlchen.typing import ChecksumEthAddress, EthTokenInfo, ExternalService\nfrom rotkehlchen.user_messages import MessagesAggregator\nfrom rotkehlchen.utils.serialization import rlk_jsonloads_dict\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\nclass Alethio(ExternalServiceWithApiKey):\n\n def __init__(\n self,\n database: DBHandler,\n msg_aggregator: MessagesAggregator,\n all_eth_tokens: List[EthTokenInfo],\n ) -> None:\n super().__init__(database=database, service_name=ExternalService.ALETHIO)\n self.msg_aggregator = msg_aggregator\n self.session = requests.session()\n self.all_tokens = all_eth_tokens\n self.session.headers.update({'User-Agent': 'rotkehlchen'})\n\n @overload # noqa: F811\n def _query( # pylint: disable=no-self-use\n self,\n root_endpoint: Literal['accounts'],\n path: str,\n ) -> List[Dict[str, Any]]:\n ...\n\n @overload # noqa: F811\n def _query( # pylint: disable=no-self-use\n self,\n root_endpoint: Literal['foo'],\n path: str,\n ) -> Dict[str, Any]:\n ...\n\n def _query(self, root_endpoint: str, path: str) -> Union[Dict[str, Any], List]: # noqa: F811\n query_str = f'https://api.aleth.io/v1/{root_endpoint}/{path}'\n log.debug(f'Querying alethio for {query_str}')\n\n api_key = self._get_api_key()\n if api_key:\n self.session.headers.update({'Authorization': f'Bearer {api_key}'})\n\n backoff = 1\n backoff_limit = 13\n while backoff < backoff_limit:\n try:\n response = self.session.get(query_str)\n except requests.exceptions.ConnectionError as e:\n if 'Max retries exceeded with url' in str(e):\n log.debug(\n f'Got max retries exceeded from alethio. Will '\n f'backoff for {backoff} seconds.',\n )\n gevent.sleep(backoff)\n backoff = backoff * 2\n if backoff >= backoff_limit:\n raise RemoteError(\n 'Getting alethio max connections error even '\n 'after we incrementally backed off',\n )\n continue\n\n raise RemoteError(f'Alethio API request failed due to {str(e)}')\n\n if response.status_code == 429:\n log.debug(\n f'Got response: {response.text} from alethio. Will '\n f'backoff for {backoff} seconds.',\n )\n gevent.sleep(backoff)\n backoff = backoff * 2\n if backoff >= backoff_limit:\n raise RemoteError(\n 'Alethio keeps returning rate limit errors even '\n 'after we incrementally backed off',\n )\n continue\n\n if response.status_code != 200:\n raise RemoteError(\n f'Alethio API request {response.url} failed '\n f'with HTTP status code {response.status_code} and text '\n f'{response.text}',\n )\n\n try:\n json_ret = rlk_jsonloads_dict(response.text)\n except JSONDecodeError:\n raise RemoteError(f'alethio returned invalid JSON response: {response.text}')\n\n data = json_ret.get('data', None)\n if data is None:\n errors = json_ret.get('errors', None)\n if errors is None:\n msg = f'Unexpected alethio response: {response.text}'\n else:\n msg = str(errors)\n raise RemoteError(f'alethio response error: {msg}')\n\n # if we got here we should return\n break\n\n return data\n\n def token_address_to_identifier(self, address: ChecksumEthAddress) -> Optional[EthTokenInfo]:\n # TODO: Cache these stuff in a mapping\n for token in self.all_tokens:\n if token.address == address:\n return token\n\n return None\n\n def get_token_balances(self, account: ChecksumEthAddress) -> Dict[EthereumToken, FVal]:\n \"\"\"Auto-detect which tokens are owned and get token balances for the account\n\n The returned balance is already normalized for the token's decimals.\n\n May raise:\n - RemoteError if there is a problem contacting aleth.io\n \"\"\"\n balances = {}\n data = self._query(root_endpoint='accounts', path=f'{account}/tokenBalances')\n for entry in data:\n entry_type = entry.get('type', None)\n if entry_type == 'TokenBalance':\n\n attributes = entry.get('attributes', None)\n balance = None\n if attributes is not None:\n balance = attributes.get('balance', None)\n if balance is None:\n continue\n\n relationships = entry.get('relationships', None)\n if relationships is None:\n continue\n token = relationships.get('token', None)\n if token is None:\n continue\n if 'data' not in token:\n continue\n if 'id' not in token['data']:\n continue\n\n token_address = to_checksum_address(token['data']['id'])\n token_info = self.token_address_to_identifier(token_address)\n if token_info is None:\n continue\n\n amount = FVal(balance) / (FVal(10) ** FVal(token_info.decimal))\n balances[EthereumToken(token_info.symbol)] = amount\n\n return balances\n", "path": "rotkehlchen/externalapis/alethio.py"}]} | 2,359 | 623 |
gh_patches_debug_79 | rasdani/github-patches | git_diff | flairNLP__flair-447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
__version__ attribute?
I'm always frustrated when flair doesn't have a __version__attribute... :-)
Please, add a __version__attribute to the module.
Thank you!
DC
</issue>
<code>
[start of flair/__init__.py]
1 import torch
2
3 from . import data
4 from . import models
5 from . import visual
6 from . import trainers
7
8 import logging.config
9
10
11 logging.config.dictConfig({
12 'version': 1,
13 'disable_existing_loggers': False,
14 'formatters': {
15 'standard': {
16 'format': '%(asctime)-15s %(message)s'
17 },
18 },
19 'handlers': {
20 'console': {
21 'level': 'INFO',
22 'class': 'logging.StreamHandler',
23 'formatter': 'standard',
24 'stream': 'ext://sys.stdout'
25 },
26 },
27 'loggers': {
28 'flair': {
29 'handlers': ['console'],
30 'level': 'INFO',
31 'propagate': False
32 }
33 },
34 'root': {
35 'handlers': ['console'],
36 'level': 'WARNING'
37 }
38 })
39
40 logger = logging.getLogger('flair')
41
42
43 device = None
44 if torch.cuda.is_available():
45 device = torch.device('cuda:0')
46 else:
47 device = torch.device('cpu')
48
[end of flair/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flair/__init__.py b/flair/__init__.py
--- a/flair/__init__.py
+++ b/flair/__init__.py
@@ -7,6 +7,7 @@
import logging.config
+__version__ = "0.4.1"
logging.config.dictConfig({
'version': 1,
| {"golden_diff": "diff --git a/flair/__init__.py b/flair/__init__.py\n--- a/flair/__init__.py\n+++ b/flair/__init__.py\n@@ -7,6 +7,7 @@\n \n import logging.config\n \n+__version__ = \"0.4.1\"\n \n logging.config.dictConfig({\n 'version': 1,\n", "issue": "__version__ attribute?\nI'm always frustrated when flair doesn't have a __version__attribute... :-)\r\n\r\nPlease, add a __version__attribute to the module.\r\n\r\nThank you!\r\nDC\r\n\n", "before_files": [{"content": "import torch\n\nfrom . import data\nfrom . import models\nfrom . import visual\nfrom . import trainers\n\nimport logging.config\n\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)-15s %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard',\n 'stream': 'ext://sys.stdout'\n },\n },\n 'loggers': {\n 'flair': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': False\n }\n },\n 'root': {\n 'handlers': ['console'],\n 'level': 'WARNING'\n }\n})\n\nlogger = logging.getLogger('flair')\n\n\ndevice = None\nif torch.cuda.is_available():\n device = torch.device('cuda:0')\nelse:\n device = torch.device('cpu')\n", "path": "flair/__init__.py"}]} | 891 | 76 |
gh_patches_debug_29935 | rasdani/github-patches | git_diff | google__jax-2481 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add multivariate normal pdf evalutation to jax.scipy
It would be great to have a Multivariate gaussian pdf/logpdf implementation, similar to the univariate version in [jax.scipy.stats.norm](https://jax.readthedocs.io/en/latest/_modules/jax/scipy/stats/norm.html#logpdf). I am currently working with this hacky function:
```
@jit
def multi_gauss_logpdf(x, mean, cov):
""" Calculate the probability density of a
sample from the multivariate normal. """
D = mean.shape[0]
(sign, logdet) = np.linalg.slogdet(cov)
p1 = D*np.log(2*np.pi) + logdet
p2 = (x-mean).T @ np.linalg.inv(cov) @ (x-mean)
return -1./2 * (p1 + p2)
batch_logpdf = vmap(multi_gauss_logpdf, in_axes=(0, None, None))
```
My `lax`/primitive knowledge is still fairly limited but I will try to put together a pr. Any recommendations how to speed things up?
</issue>
<code>
[start of jax/scipy/stats/multivariate_normal.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import numpy as np
17 import scipy.stats as osp_stats
18
19 from ... import lax
20 from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps
21 from ...numpy.lax_numpy import dot, subtract, einsum
22 from ...numpy.linalg import det, inv
23
24
25 @_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)
26 def logpdf(x, mean, cov):
27 x, mean, cov = _promote_dtypes_inexact(x, mean, cov)
28 two = _constant_like(x, 2)
29 dim = _constant_like(x, mean.shape[0])
30 det_sig = det(cov).astype(cov.dtype)
31 log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),
32 det_sig))
33 x_shape = x.shape[:-1]
34 if x_shape:
35 x_2d = x.reshape((-1, mean.shape[0]))
36 quadratic = einsum("ij,jk,ik->i", subtract(x_2d, mean), inv(cov),
37 subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)
38 else:
39 quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)
40 return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)
41
42 @_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)
43 def pdf(x, mean, cov):
44 return lax.exp(logpdf(x, mean, cov))
45
[end of jax/scipy/stats/multivariate_normal.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/jax/scipy/stats/multivariate_normal.py b/jax/scipy/stats/multivariate_normal.py
--- a/jax/scipy/stats/multivariate_normal.py
+++ b/jax/scipy/stats/multivariate_normal.py
@@ -17,27 +17,29 @@
import scipy.stats as osp_stats
from ... import lax
+from ...lax_linalg import cholesky, triangular_solve
+from ... import numpy as jnp
from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps
-from ...numpy.lax_numpy import dot, subtract, einsum
-from ...numpy.linalg import det, inv
@_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)
def logpdf(x, mean, cov):
x, mean, cov = _promote_dtypes_inexact(x, mean, cov)
- two = _constant_like(x, 2)
- dim = _constant_like(x, mean.shape[0])
- det_sig = det(cov).astype(cov.dtype)
- log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),
- det_sig))
- x_shape = x.shape[:-1]
- if x_shape:
- x_2d = x.reshape((-1, mean.shape[0]))
- quadratic = einsum("ij,jk,ik->i", subtract(x_2d, mean), inv(cov),
- subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)
+ if not mean.shape:
+ return -1/2 * (x - mean) ** 2 / cov - 1/2 * (np.log(2*np.pi) + jnp.log(cov))
else:
- quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)
- return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)
+ n = mean.shape[-1]
+ if not np.shape(cov):
+ y = x - mean
+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) / cov
+ - n/2 * (np.log(2*np.pi) + jnp.log(cov)))
+ else:
+ if cov.ndim < 2 or cov.shape[-2:] != (n, n):
+ raise ValueError("multivariate_normal.logpdf got incompatible shapes")
+ L = cholesky(cov)
+ y = triangular_solve(L, x - mean, lower=True, transpose_a=True)
+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi)
+ - jnp.log(L.diagonal()).sum())
@_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)
def pdf(x, mean, cov):
| {"golden_diff": "diff --git a/jax/scipy/stats/multivariate_normal.py b/jax/scipy/stats/multivariate_normal.py\n--- a/jax/scipy/stats/multivariate_normal.py\n+++ b/jax/scipy/stats/multivariate_normal.py\n@@ -17,27 +17,29 @@\n import scipy.stats as osp_stats\n \n from ... import lax\n+from ...lax_linalg import cholesky, triangular_solve\n+from ... import numpy as jnp\n from ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps\n-from ...numpy.lax_numpy import dot, subtract, einsum\n-from ...numpy.linalg import det, inv\n \n \n @_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)\n def logpdf(x, mean, cov):\n x, mean, cov = _promote_dtypes_inexact(x, mean, cov)\n- two = _constant_like(x, 2)\n- dim = _constant_like(x, mean.shape[0])\n- det_sig = det(cov).astype(cov.dtype)\n- log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),\n- det_sig))\n- x_shape = x.shape[:-1]\n- if x_shape:\n- x_2d = x.reshape((-1, mean.shape[0]))\n- quadratic = einsum(\"ij,jk,ik->i\", subtract(x_2d, mean), inv(cov), \n- subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)\n+ if not mean.shape:\n+ return -1/2 * (x - mean) ** 2 / cov - 1/2 * (np.log(2*np.pi) + jnp.log(cov))\n else:\n- quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)\n- return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)\n+ n = mean.shape[-1]\n+ if not np.shape(cov):\n+ y = x - mean\n+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) / cov\n+ - n/2 * (np.log(2*np.pi) + jnp.log(cov)))\n+ else:\n+ if cov.ndim < 2 or cov.shape[-2:] != (n, n):\n+ raise ValueError(\"multivariate_normal.logpdf got incompatible shapes\")\n+ L = cholesky(cov)\n+ y = triangular_solve(L, x - mean, lower=True, transpose_a=True)\n+ return (-1/2 * jnp.einsum('...i,...i->...', y, y) - n/2*np.log(2*np.pi)\n+ - jnp.log(L.diagonal()).sum())\n \n @_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)\n def pdf(x, mean, cov):\n", "issue": "Add multivariate normal pdf evalutation to jax.scipy\nIt would be great to have a Multivariate gaussian pdf/logpdf implementation, similar to the univariate version in [jax.scipy.stats.norm](https://jax.readthedocs.io/en/latest/_modules/jax/scipy/stats/norm.html#logpdf). I am currently working with this hacky function: \r\n\r\n```\r\n@jit\r\ndef multi_gauss_logpdf(x, mean, cov):\r\n \"\"\" Calculate the probability density of a\r\n sample from the multivariate normal. \"\"\"\r\n D = mean.shape[0]\r\n (sign, logdet) = np.linalg.slogdet(cov)\r\n p1 = D*np.log(2*np.pi) + logdet\r\n p2 = (x-mean).T @ np.linalg.inv(cov) @ (x-mean)\r\n return -1./2 * (p1 + p2)\r\n\r\nbatch_logpdf = vmap(multi_gauss_logpdf, in_axes=(0, None, None))\r\n```\r\n\r\nMy `lax`/primitive knowledge is still fairly limited but I will try to put together a pr. Any recommendations how to speed things up?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport scipy.stats as osp_stats\n\nfrom ... import lax\nfrom ...numpy.lax_numpy import _promote_dtypes_inexact, _constant_like, _wraps\nfrom ...numpy.lax_numpy import dot, subtract, einsum\nfrom ...numpy.linalg import det, inv\n\n\n@_wraps(osp_stats.multivariate_normal.logpdf, update_doc=False)\ndef logpdf(x, mean, cov):\n x, mean, cov = _promote_dtypes_inexact(x, mean, cov)\n two = _constant_like(x, 2)\n dim = _constant_like(x, mean.shape[0])\n det_sig = det(cov).astype(cov.dtype)\n log_normalizer = lax.log(lax.mul(lax.pow(_constant_like(x, 2 * np.pi), dim),\n det_sig))\n x_shape = x.shape[:-1]\n if x_shape:\n x_2d = x.reshape((-1, mean.shape[0]))\n quadratic = einsum(\"ij,jk,ik->i\", subtract(x_2d, mean), inv(cov), \n subtract(x_2d, mean)).reshape(x_shape).astype(cov.dtype)\n else:\n quadratic = dot(dot(subtract(x, mean), inv(cov)), subtract(x, mean).T).astype(cov.dtype)\n return lax.div(lax.neg(lax.add(log_normalizer, quadratic)), two)\n\n@_wraps(osp_stats.multivariate_normal.pdf, update_doc=False)\ndef pdf(x, mean, cov):\n return lax.exp(logpdf(x, mean, cov))\n", "path": "jax/scipy/stats/multivariate_normal.py"}]} | 1,335 | 655 |
gh_patches_debug_17452 | rasdani/github-patches | git_diff | streamlink__streamlink-5908 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.vkplay: vkplay.live has moved to another domain (live.vkplay.ru)
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.7.2
### Description
https://vk.com/wall-212496568_91026
yesterday, vkplay live changed its domain. if you specify the old domain in the link to the stream, then everything still works, but on the site itself there are links to a new domain, to which the existing plugin does not respond.
I just tried to change the updated part of the domain in the plugin code (vkplay.live -> live.vkplay.ru ), and everything seems to be working well. It's a bit difficult for me to create a pull request, but here's the corrected plugin on gist:
https://gist.github.com/oexlkinq/eef0a260dddad473c5febafd91b980d9
the old domain is also listed in the documentation (https://streamlink.github.io/plugins.html#vkplay)
### Debug log
```text
streamlink https://live.vkplay.ru/ruwarface 720p --loglevel=debug
[cli][debug] OS: Linux-6.8.1-arch1-1-x86_64-with-glibc2.39
[cli][debug] Python: 3.11.8
[cli][debug] OpenSSL: OpenSSL 3.2.1 30 Jan 2024
[cli][debug] Streamlink: 6.7.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2024.2.2
[cli][debug] exceptiongroup: 1.2.0
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 5.1.0
[cli][debug] pycountry: 23.12.11
[cli][debug] pycryptodome: 3.20.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.25.0
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.10.0
[cli][debug] urllib3: 1.26.18
[cli][debug] websocket-client: 1.7.0
[cli][debug] Arguments:
[cli][debug] url=https://live.vkplay.ru/ruwarface
[cli][debug] stream=['720p']
[cli][debug] --loglevel=debug
error: No plugin can handle URL: https://live.vkplay.ru/ruwarface
```
</issue>
<code>
[start of src/streamlink/plugins/vkplay.py]
1 """
2 $description Russian live-streaming platform for gaming and esports, owned by VKontakte.
3 $url vkplay.live
4 $type live
5 $metadata id
6 $metadata author
7 $metadata category
8 $metadata title
9 """
10
11 import logging
12 import re
13
14 from streamlink.plugin import Plugin, pluginmatcher
15 from streamlink.plugin.api import validate
16 from streamlink.stream.hls import HLSStream
17
18
19 log = logging.getLogger(__name__)
20
21
22 @pluginmatcher(re.compile(
23 r"https?://vkplay\.live/(?P<channel_name>\w+)/?$",
24 ))
25 class VKplay(Plugin):
26 API_URL = "https://api.vkplay.live/v1"
27
28 def _get_streams(self):
29 self.author = self.match.group("channel_name")
30 log.debug(f"Channel name: {self.author}")
31
32 data = self.session.http.get(
33 f"{self.API_URL}/blog/{self.author}/public_video_stream",
34 headers={"Referer": self.url},
35 acceptable_status=(200, 404),
36 schema=validate.Schema(
37 validate.parse_json(),
38 validate.any(
39 validate.all(
40 {"error": str, "error_description": str},
41 validate.get("error_description"),
42 ),
43 validate.all(
44 {
45 validate.optional("category"): validate.all(
46 {
47 "title": str,
48 },
49 validate.get("title"),
50 ),
51 "title": str,
52 "data": validate.any(
53 [
54 validate.all(
55 {
56 "vid": str,
57 "playerUrls": [
58 validate.all(
59 {
60 "type": str,
61 "url": validate.any("", validate.url()),
62 },
63 validate.union_get("type", "url"),
64 ),
65 ],
66 },
67 validate.union_get("vid", "playerUrls"),
68 ),
69 ],
70 [],
71 ),
72 },
73 validate.union_get(
74 "category",
75 "title",
76 ("data", 0),
77 ),
78 ),
79 ),
80 ),
81 )
82 if isinstance(data, str):
83 log.error(data)
84 return
85
86 self.category, self.title, streamdata = data
87 if not streamdata:
88 return
89
90 self.id, streams = streamdata
91
92 for streamtype, streamurl in streams:
93 if streamurl and streamtype == "live_hls":
94 return HLSStream.parse_variant_playlist(self.session, streamurl)
95
96
97 __plugin__ = VKplay
98
[end of src/streamlink/plugins/vkplay.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/vkplay.py b/src/streamlink/plugins/vkplay.py
--- a/src/streamlink/plugins/vkplay.py
+++ b/src/streamlink/plugins/vkplay.py
@@ -1,6 +1,6 @@
"""
$description Russian live-streaming platform for gaming and esports, owned by VKontakte.
-$url vkplay.live
+$url live.vkplay.ru
$type live
$metadata id
$metadata author
@@ -20,13 +20,13 @@
@pluginmatcher(re.compile(
- r"https?://vkplay\.live/(?P<channel_name>\w+)/?$",
+ r"https?://(?:live\.vkplay\.ru|vkplay\.live)/(?P<channel_name>\w+)/?$",
))
class VKplay(Plugin):
- API_URL = "https://api.vkplay.live/v1"
+ API_URL = "https://api.live.vkplay.ru/v1"
def _get_streams(self):
- self.author = self.match.group("channel_name")
+ self.author = self.match["channel_name"]
log.debug(f"Channel name: {self.author}")
data = self.session.http.get(
| {"golden_diff": "diff --git a/src/streamlink/plugins/vkplay.py b/src/streamlink/plugins/vkplay.py\n--- a/src/streamlink/plugins/vkplay.py\n+++ b/src/streamlink/plugins/vkplay.py\n@@ -1,6 +1,6 @@\n \"\"\"\n $description Russian live-streaming platform for gaming and esports, owned by VKontakte.\n-$url vkplay.live\n+$url live.vkplay.ru\n $type live\n $metadata id\n $metadata author\n@@ -20,13 +20,13 @@\n \n \n @pluginmatcher(re.compile(\n- r\"https?://vkplay\\.live/(?P<channel_name>\\w+)/?$\",\n+ r\"https?://(?:live\\.vkplay\\.ru|vkplay\\.live)/(?P<channel_name>\\w+)/?$\",\n ))\n class VKplay(Plugin):\n- API_URL = \"https://api.vkplay.live/v1\"\n+ API_URL = \"https://api.live.vkplay.ru/v1\"\n \n def _get_streams(self):\n- self.author = self.match.group(\"channel_name\")\n+ self.author = self.match[\"channel_name\"]\n log.debug(f\"Channel name: {self.author}\")\n \n data = self.session.http.get(\n", "issue": "plugins.vkplay: vkplay.live has moved to another domain (live.vkplay.ru)\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.7.2\n\n### Description\n\nhttps://vk.com/wall-212496568_91026\r\n\r\nyesterday, vkplay live changed its domain. if you specify the old domain in the link to the stream, then everything still works, but on the site itself there are links to a new domain, to which the existing plugin does not respond.\r\n\r\nI just tried to change the updated part of the domain in the plugin code (vkplay.live -> live.vkplay.ru ), and everything seems to be working well. It's a bit difficult for me to create a pull request, but here's the corrected plugin on gist:\r\nhttps://gist.github.com/oexlkinq/eef0a260dddad473c5febafd91b980d9\r\n\r\nthe old domain is also listed in the documentation (https://streamlink.github.io/plugins.html#vkplay)\n\n### Debug log\n\n```text\nstreamlink https://live.vkplay.ru/ruwarface 720p --loglevel=debug\r\n[cli][debug] OS: Linux-6.8.1-arch1-1-x86_64-with-glibc2.39\r\n[cli][debug] Python: 3.11.8\r\n[cli][debug] OpenSSL: OpenSSL 3.2.1 30 Jan 2024\r\n[cli][debug] Streamlink: 6.7.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2024.2.2\r\n[cli][debug] exceptiongroup: 1.2.0\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 5.1.0\r\n[cli][debug] pycountry: 23.12.11\r\n[cli][debug] pycryptodome: 3.20.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.25.0\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.10.0\r\n[cli][debug] urllib3: 1.26.18\r\n[cli][debug] websocket-client: 1.7.0\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://live.vkplay.ru/ruwarface\r\n[cli][debug] stream=['720p']\r\n[cli][debug] --loglevel=debug\r\nerror: No plugin can handle URL: https://live.vkplay.ru/ruwarface\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Russian live-streaming platform for gaming and esports, owned by VKontakte.\n$url vkplay.live\n$type live\n$metadata id\n$metadata author\n$metadata category\n$metadata title\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://vkplay\\.live/(?P<channel_name>\\w+)/?$\",\n))\nclass VKplay(Plugin):\n API_URL = \"https://api.vkplay.live/v1\"\n\n def _get_streams(self):\n self.author = self.match.group(\"channel_name\")\n log.debug(f\"Channel name: {self.author}\")\n\n data = self.session.http.get(\n f\"{self.API_URL}/blog/{self.author}/public_video_stream\",\n headers={\"Referer\": self.url},\n acceptable_status=(200, 404),\n schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n validate.all(\n {\"error\": str, \"error_description\": str},\n validate.get(\"error_description\"),\n ),\n validate.all(\n {\n validate.optional(\"category\"): validate.all(\n {\n \"title\": str,\n },\n validate.get(\"title\"),\n ),\n \"title\": str,\n \"data\": validate.any(\n [\n validate.all(\n {\n \"vid\": str,\n \"playerUrls\": [\n validate.all(\n {\n \"type\": str,\n \"url\": validate.any(\"\", validate.url()),\n },\n validate.union_get(\"type\", \"url\"),\n ),\n ],\n },\n validate.union_get(\"vid\", \"playerUrls\"),\n ),\n ],\n [],\n ),\n },\n validate.union_get(\n \"category\",\n \"title\",\n (\"data\", 0),\n ),\n ),\n ),\n ),\n )\n if isinstance(data, str):\n log.error(data)\n return\n\n self.category, self.title, streamdata = data\n if not streamdata:\n return\n\n self.id, streams = streamdata\n\n for streamtype, streamurl in streams:\n if streamurl and streamtype == \"live_hls\":\n return HLSStream.parse_variant_playlist(self.session, streamurl)\n\n\n__plugin__ = VKplay\n", "path": "src/streamlink/plugins/vkplay.py"}]} | 2,041 | 261 |
gh_patches_debug_3690 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Le code HTML des ePUBs est visible
Je ne sais pas si c'est un bug de zds-site ou zmd.
Créer un billet, le publier. L'export au format ePUB contient le contenu attendu, mais on voit les balises HTML (ce qui rend le fichier illisible).
Le bug est présent avec zmd 10 et 11.
</issue>
<code>
[start of zds/utils/templatetags/emarkdown.py]
1 import re
2 import json
3 import logging
4 from requests import post, HTTPError
5
6 from django import template
7 from django.conf import settings
8 from django.template.defaultfilters import stringfilter
9 from django.utils.safestring import mark_safe
10 from django.utils.translation import gettext_lazy as _
11
12 logger = logging.getLogger(__name__)
13 register = template.Library()
14 """
15 Markdown related filters.
16 """
17
18 # Constants
19 MAX_ATTEMPTS = 3
20 MD_PARSING_ERROR = _("Une erreur est survenue dans la génération de texte Markdown. Veuillez rapporter le bug.")
21
22 FORMAT_ENDPOINTS = {
23 "html": "/html",
24 "texfile": "/latex-document",
25 "epub": "/epub",
26 "tex": "/latex",
27 }
28
29
30 def _render_markdown_once(md_input, *, output_format="html", **kwargs):
31 """
32 Returns None on error (error details are logged). No retry mechanism.
33 """
34
35 def log_args():
36 logger.error(f"md_input: {md_input!r}")
37 logger.error(f"kwargs: {kwargs!r}")
38
39 inline = kwargs.get("inline", False) is True
40 full_json = kwargs.pop("full_json", False)
41
42 if settings.ZDS_APP["zmd"]["disable_pings"] is True:
43 kwargs["disable_ping"] = True
44
45 endpoint = FORMAT_ENDPOINTS[output_format]
46
47 try:
48 timeout = 10
49 real_input = str(md_input)
50 if output_format.startswith("tex") or full_json:
51 # latex may be really long to generate but it is also restrained by server configuration
52 timeout = 120
53 # use manifest renderer
54 real_input = md_input
55 response = post(
56 "{}{}".format(settings.ZDS_APP["zmd"]["server"], endpoint),
57 json={
58 "opts": kwargs,
59 "md": real_input,
60 },
61 timeout=timeout,
62 )
63 except HTTPError:
64 logger.exception("An HTTP error happened, markdown rendering failed")
65 log_args()
66 return "", {}, []
67
68 if response.status_code == 413:
69 return "", {}, [{"message": str(_("Texte trop volumineux."))}]
70
71 if response.status_code != 200:
72 logger.error(f"The markdown server replied with status {response.status_code} (expected 200)")
73 log_args()
74 return "", {}, []
75
76 try:
77 content, metadata, messages = response.json()
78 logger.debug("Result %s, %s, %s", content, metadata, messages)
79 if messages:
80 logger.error("Markdown errors %s", json.dumps(messages))
81 if isinstance(content, str):
82 content = content.strip()
83 if inline:
84 content = content.replace("</p>\n", "\n\n").replace("\n<p>", "\n")
85 if full_json:
86 return content, metadata, messages
87 return mark_safe(content), metadata, messages
88 except: # noqa
89 logger.exception("Unexpected exception raised")
90 log_args()
91 return "", {}, []
92
93
94 def render_markdown(md_input, *, on_error=None, disable_jsfiddle=True, **kwargs):
95 """Render a markdown string.
96
97 Returns a tuple ``(rendered_content, metadata)``, where
98 ``rendered_content`` is a string and ``metadata`` is a dict.
99
100 Handles errors gracefully by returning an user-friendly HTML
101 string which explains that the Markdown rendering has failed
102 (without any technical details).
103
104 """
105 opts = {"disable_jsfiddle": disable_jsfiddle}
106 opts.update(kwargs)
107 content, metadata, messages = _render_markdown_once(md_input, **opts)
108 if messages and on_error:
109 on_error([m["message"] for m in messages])
110 if content is not None:
111 # Success!
112 return content, metadata, messages
113
114 # Oops, something went wrong
115
116 attempts = kwargs.get("attempts", 0)
117 inline = kwargs.get("inline", False) is True
118
119 if attempts < MAX_ATTEMPTS:
120 if not kwargs:
121 kwargs = dict()
122 return render_markdown(md_input, **dict(kwargs, attempts=attempts + 1))
123
124 logger.error("Max attempt count reached, giving up")
125 logger.error(f"md_input: {md_input!r}")
126 logger.error(f"kwargs: {kwargs!r}")
127
128 # FIXME: This cannot work with LaTeX.
129 if inline:
130 return mark_safe(f"<p>{json.dumps(messages)}</p>"), metadata, []
131 else:
132 return mark_safe(f'<div class="error ico-after"><p>{json.dumps(messages)}</p></div>'), metadata, []
133
134
135 def render_markdown_stats(md_input, **kwargs):
136 """
137 Returns contents statistics (words and chars)
138 """
139 kwargs["stats"] = True
140 kwargs["disable_images_download"] = True
141 logger.setLevel(logging.INFO)
142 content, metadata, messages = _render_markdown_once(md_input, output_format="tex", **kwargs)
143 if metadata:
144 return metadata.get("stats", {}).get("signs", {})
145 return None
146
147
148 @register.filter(name="epub_markdown", needs_autoescape=False)
149 def epub_markdown(md_input, image_directory):
150 media_root = str(settings.MEDIA_ROOT)
151 if not media_root.endswith("/"):
152 media_root += "/"
153 replaced_media_url = settings.MEDIA_URL
154 if replaced_media_url.startswith("/"):
155 replaced_media_url = replaced_media_url[1:]
156 return (
157 emarkdown(
158 md_input,
159 output_format="epub",
160 images_download_dir=image_directory.absolute,
161 local_url_to_local_path=[settings.MEDIA_URL + "galleries/[0-9]+", image_directory.relative],
162 )
163 .replace('src"/', f'src="{media_root}')
164 .replace(f'src="{media_root}{replaced_media_url}', f'src="{media_root}')
165 )
166
167
168 @register.filter(needs_autoescape=False)
169 @stringfilter
170 def emarkdown(md_input, use_jsfiddle="", **kwargs):
171 """
172 :param str md_input: Markdown string.
173 :return: HTML string.
174 :rtype: str
175 """
176 disable_jsfiddle = use_jsfiddle != "js"
177 content, metadata, messages = render_markdown(
178 md_input,
179 on_error=lambda m: logger.error("Markdown errors %s", str(m)),
180 **dict(kwargs, disable_jsfiddle=disable_jsfiddle),
181 )
182 kwargs.get("metadata", {}).update(metadata)
183 return content or ""
184
185
186 @register.filter(needs_autoescape=False)
187 @stringfilter
188 def emarkdown_preview(md_input, use_jsfiddle="", **kwargs):
189 """
190 Filter markdown string and render it to html.
191
192 :param str md_input: Markdown string.
193 :return: HTML string.
194 :rtype: str
195 """
196 disable_jsfiddle = use_jsfiddle != "js"
197
198 content, metadata, messages = render_markdown(md_input, **dict(kwargs, disable_jsfiddle=disable_jsfiddle))
199
200 if messages:
201 content = _(
202 '</div><div class="preview-error"><strong>Erreur du serveur Markdown:</strong>\n{}'.format(
203 "<br>- ".join([m["message"] for m in messages])
204 )
205 )
206 content = mark_safe(content)
207
208 return content
209
210
211 @register.filter(needs_autoescape=False)
212 @stringfilter
213 def emarkdown_inline(text):
214 """
215 Parses inline elements only and renders HTML. Mainly for member signatures.
216 Although they are inline elements, pings are disabled.
217
218 :param str text: Markdown string.
219 :return: HTML string.
220 :rtype: str
221 """
222 rendered = emarkdown(text, inline=True)
223 return mark_safe(rendered.replace("<a href=", '<a rel="nofollow" href='))
224
225
226 def sub_hd(match, count):
227 """Replace header shifted."""
228 subt = match.group(1)
229 lvl = match.group("level")
230 header = match.group("header")
231 end = match.group(4)
232
233 new_content = subt + "#" * count + lvl + header + end
234
235 return new_content
236
237
238 def shift_heading(text, count):
239 """
240 Shift header in markdown document.
241
242 :param str text: Text to filter.
243 :param int count:
244 :return: Filtered text.
245 :rtype: str
246 """
247 text_by_code = re.split("(```|~~~)", text)
248 starting_code = None
249 for i, element in enumerate(text_by_code):
250 if element in ["```", "~~~"] and not starting_code:
251 starting_code = element
252 elif element == starting_code:
253 starting_code = None
254 elif starting_code is None:
255 text_by_code[i] = re.sub(
256 r"(^|\n)(?P<level>#{1,4})(?P<header>.*?)#*(\n|$)", lambda t: sub_hd(t, count), text_by_code[i]
257 )
258
259 return "".join(text_by_code)
260
261
262 @register.filter("shift_heading_1")
263 def shift_heading_1(text):
264 return shift_heading(text, 1)
265
266
267 @register.filter("shift_heading_2")
268 def shift_heading_2(text):
269 return shift_heading(text, 2)
270
271
272 @register.filter("shift_heading_3")
273 def shift_heading_3(text):
274 return shift_heading(text, 3)
275
[end of zds/utils/templatetags/emarkdown.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/utils/templatetags/emarkdown.py b/zds/utils/templatetags/emarkdown.py
--- a/zds/utils/templatetags/emarkdown.py
+++ b/zds/utils/templatetags/emarkdown.py
@@ -153,7 +153,7 @@
replaced_media_url = settings.MEDIA_URL
if replaced_media_url.startswith("/"):
replaced_media_url = replaced_media_url[1:]
- return (
+ return mark_safe(
emarkdown(
md_input,
output_format="epub",
| {"golden_diff": "diff --git a/zds/utils/templatetags/emarkdown.py b/zds/utils/templatetags/emarkdown.py\n--- a/zds/utils/templatetags/emarkdown.py\n+++ b/zds/utils/templatetags/emarkdown.py\n@@ -153,7 +153,7 @@\n replaced_media_url = settings.MEDIA_URL\n if replaced_media_url.startswith(\"/\"):\n replaced_media_url = replaced_media_url[1:]\n- return (\n+ return mark_safe(\n emarkdown(\n md_input,\n output_format=\"epub\",\n", "issue": "Le code HTML des ePUBs est visible\nJe ne sais pas si c'est un bug de zds-site ou zmd.\r\n\r\nCr\u00e9er un billet, le publier. L'export au format ePUB contient le contenu attendu, mais on voit les balises HTML (ce qui rend le fichier illisible).\r\n\r\nLe bug est pr\u00e9sent avec zmd 10 et 11.\r\n\n", "before_files": [{"content": "import re\nimport json\nimport logging\nfrom requests import post, HTTPError\n\nfrom django import template\nfrom django.conf import settings\nfrom django.template.defaultfilters import stringfilter\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import gettext_lazy as _\n\nlogger = logging.getLogger(__name__)\nregister = template.Library()\n\"\"\"\nMarkdown related filters.\n\"\"\"\n\n# Constants\nMAX_ATTEMPTS = 3\nMD_PARSING_ERROR = _(\"Une erreur est survenue dans la g\u00e9n\u00e9ration de texte Markdown. Veuillez rapporter le bug.\")\n\nFORMAT_ENDPOINTS = {\n \"html\": \"/html\",\n \"texfile\": \"/latex-document\",\n \"epub\": \"/epub\",\n \"tex\": \"/latex\",\n}\n\n\ndef _render_markdown_once(md_input, *, output_format=\"html\", **kwargs):\n \"\"\"\n Returns None on error (error details are logged). No retry mechanism.\n \"\"\"\n\n def log_args():\n logger.error(f\"md_input: {md_input!r}\")\n logger.error(f\"kwargs: {kwargs!r}\")\n\n inline = kwargs.get(\"inline\", False) is True\n full_json = kwargs.pop(\"full_json\", False)\n\n if settings.ZDS_APP[\"zmd\"][\"disable_pings\"] is True:\n kwargs[\"disable_ping\"] = True\n\n endpoint = FORMAT_ENDPOINTS[output_format]\n\n try:\n timeout = 10\n real_input = str(md_input)\n if output_format.startswith(\"tex\") or full_json:\n # latex may be really long to generate but it is also restrained by server configuration\n timeout = 120\n # use manifest renderer\n real_input = md_input\n response = post(\n \"{}{}\".format(settings.ZDS_APP[\"zmd\"][\"server\"], endpoint),\n json={\n \"opts\": kwargs,\n \"md\": real_input,\n },\n timeout=timeout,\n )\n except HTTPError:\n logger.exception(\"An HTTP error happened, markdown rendering failed\")\n log_args()\n return \"\", {}, []\n\n if response.status_code == 413:\n return \"\", {}, [{\"message\": str(_(\"Texte trop volumineux.\"))}]\n\n if response.status_code != 200:\n logger.error(f\"The markdown server replied with status {response.status_code} (expected 200)\")\n log_args()\n return \"\", {}, []\n\n try:\n content, metadata, messages = response.json()\n logger.debug(\"Result %s, %s, %s\", content, metadata, messages)\n if messages:\n logger.error(\"Markdown errors %s\", json.dumps(messages))\n if isinstance(content, str):\n content = content.strip()\n if inline:\n content = content.replace(\"</p>\\n\", \"\\n\\n\").replace(\"\\n<p>\", \"\\n\")\n if full_json:\n return content, metadata, messages\n return mark_safe(content), metadata, messages\n except: # noqa\n logger.exception(\"Unexpected exception raised\")\n log_args()\n return \"\", {}, []\n\n\ndef render_markdown(md_input, *, on_error=None, disable_jsfiddle=True, **kwargs):\n \"\"\"Render a markdown string.\n\n Returns a tuple ``(rendered_content, metadata)``, where\n ``rendered_content`` is a string and ``metadata`` is a dict.\n\n Handles errors gracefully by returning an user-friendly HTML\n string which explains that the Markdown rendering has failed\n (without any technical details).\n\n \"\"\"\n opts = {\"disable_jsfiddle\": disable_jsfiddle}\n opts.update(kwargs)\n content, metadata, messages = _render_markdown_once(md_input, **opts)\n if messages and on_error:\n on_error([m[\"message\"] for m in messages])\n if content is not None:\n # Success!\n return content, metadata, messages\n\n # Oops, something went wrong\n\n attempts = kwargs.get(\"attempts\", 0)\n inline = kwargs.get(\"inline\", False) is True\n\n if attempts < MAX_ATTEMPTS:\n if not kwargs:\n kwargs = dict()\n return render_markdown(md_input, **dict(kwargs, attempts=attempts + 1))\n\n logger.error(\"Max attempt count reached, giving up\")\n logger.error(f\"md_input: {md_input!r}\")\n logger.error(f\"kwargs: {kwargs!r}\")\n\n # FIXME: This cannot work with LaTeX.\n if inline:\n return mark_safe(f\"<p>{json.dumps(messages)}</p>\"), metadata, []\n else:\n return mark_safe(f'<div class=\"error ico-after\"><p>{json.dumps(messages)}</p></div>'), metadata, []\n\n\ndef render_markdown_stats(md_input, **kwargs):\n \"\"\"\n Returns contents statistics (words and chars)\n \"\"\"\n kwargs[\"stats\"] = True\n kwargs[\"disable_images_download\"] = True\n logger.setLevel(logging.INFO)\n content, metadata, messages = _render_markdown_once(md_input, output_format=\"tex\", **kwargs)\n if metadata:\n return metadata.get(\"stats\", {}).get(\"signs\", {})\n return None\n\n\[email protected](name=\"epub_markdown\", needs_autoescape=False)\ndef epub_markdown(md_input, image_directory):\n media_root = str(settings.MEDIA_ROOT)\n if not media_root.endswith(\"/\"):\n media_root += \"/\"\n replaced_media_url = settings.MEDIA_URL\n if replaced_media_url.startswith(\"/\"):\n replaced_media_url = replaced_media_url[1:]\n return (\n emarkdown(\n md_input,\n output_format=\"epub\",\n images_download_dir=image_directory.absolute,\n local_url_to_local_path=[settings.MEDIA_URL + \"galleries/[0-9]+\", image_directory.relative],\n )\n .replace('src\"/', f'src=\"{media_root}')\n .replace(f'src=\"{media_root}{replaced_media_url}', f'src=\"{media_root}')\n )\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown(md_input, use_jsfiddle=\"\", **kwargs):\n \"\"\"\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = use_jsfiddle != \"js\"\n content, metadata, messages = render_markdown(\n md_input,\n on_error=lambda m: logger.error(\"Markdown errors %s\", str(m)),\n **dict(kwargs, disable_jsfiddle=disable_jsfiddle),\n )\n kwargs.get(\"metadata\", {}).update(metadata)\n return content or \"\"\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_preview(md_input, use_jsfiddle=\"\", **kwargs):\n \"\"\"\n Filter markdown string and render it to html.\n\n :param str md_input: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n disable_jsfiddle = use_jsfiddle != \"js\"\n\n content, metadata, messages = render_markdown(md_input, **dict(kwargs, disable_jsfiddle=disable_jsfiddle))\n\n if messages:\n content = _(\n '</div><div class=\"preview-error\"><strong>Erreur du serveur Markdown:</strong>\\n{}'.format(\n \"<br>- \".join([m[\"message\"] for m in messages])\n )\n )\n content = mark_safe(content)\n\n return content\n\n\[email protected](needs_autoescape=False)\n@stringfilter\ndef emarkdown_inline(text):\n \"\"\"\n Parses inline elements only and renders HTML. Mainly for member signatures.\n Although they are inline elements, pings are disabled.\n\n :param str text: Markdown string.\n :return: HTML string.\n :rtype: str\n \"\"\"\n rendered = emarkdown(text, inline=True)\n return mark_safe(rendered.replace(\"<a href=\", '<a rel=\"nofollow\" href='))\n\n\ndef sub_hd(match, count):\n \"\"\"Replace header shifted.\"\"\"\n subt = match.group(1)\n lvl = match.group(\"level\")\n header = match.group(\"header\")\n end = match.group(4)\n\n new_content = subt + \"#\" * count + lvl + header + end\n\n return new_content\n\n\ndef shift_heading(text, count):\n \"\"\"\n Shift header in markdown document.\n\n :param str text: Text to filter.\n :param int count:\n :return: Filtered text.\n :rtype: str\n \"\"\"\n text_by_code = re.split(\"(```|~~~)\", text)\n starting_code = None\n for i, element in enumerate(text_by_code):\n if element in [\"```\", \"~~~\"] and not starting_code:\n starting_code = element\n elif element == starting_code:\n starting_code = None\n elif starting_code is None:\n text_by_code[i] = re.sub(\n r\"(^|\\n)(?P<level>#{1,4})(?P<header>.*?)#*(\\n|$)\", lambda t: sub_hd(t, count), text_by_code[i]\n )\n\n return \"\".join(text_by_code)\n\n\[email protected](\"shift_heading_1\")\ndef shift_heading_1(text):\n return shift_heading(text, 1)\n\n\[email protected](\"shift_heading_2\")\ndef shift_heading_2(text):\n return shift_heading(text, 2)\n\n\[email protected](\"shift_heading_3\")\ndef shift_heading_3(text):\n return shift_heading(text, 3)\n", "path": "zds/utils/templatetags/emarkdown.py"}]} | 3,368 | 128 |
gh_patches_debug_30683 | rasdani/github-patches | git_diff | scikit-image__scikit-image-4313 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docstring in util.pad returns <no docstring>
## Description
`util.pad()` returns an empty docstring; however, [the docstring is there](https://github.com/scikit-image/scikit-image/blob/v0.12.2/skimage/util/arraypad.py#L1117).
Any idea on what's happening? Thanks y'all!
## Way to reproduce
```python
In [1]: from skimage import util
In [2]: util.pad?
Signature: util.pad(*args, **kwargs)
Docstring: <no docstring>
File: ~/<__array_function__ internals>
Type: function
```
## Version information
```python
# Paste the output of the following python commands
>>> from __future__ import print_function
>>> import sys; print(sys.version)
3.7.4 (default, Aug 13 2019, 20:35:49)
[GCC 7.3.0]
>>> import platform; print(platform.platform())
Linux-5.3.0-20-generic-x86_64-with-debian-buster-sid
>>> import skimage; print("scikit-image version: {}".format(skimage.__version__))
scikit-image version: 0.16.2
>>> import numpy; print("numpy version: {}".format(numpy.__version__))
numpy version: 1.17.3
```
</issue>
<code>
[start of skimage/util/__init__.py]
1 from .dtype import (img_as_float32, img_as_float64, img_as_float,
2 img_as_int, img_as_uint, img_as_ubyte,
3 img_as_bool, dtype_limits)
4 from .shape import view_as_blocks, view_as_windows
5 from .noise import random_noise
6 from .apply_parallel import apply_parallel
7
8 from .arraycrop import crop
9 from .compare import compare_images
10 from ._regular_grid import regular_grid, regular_seeds
11 from .unique import unique_rows
12 from ._invert import invert
13 from ._montage import montage
14
15 from .._shared.utils import copy_func
16
17 from numpy import pad as numpy_pad
18 pad = copy_func(numpy_pad, name='pad')
19
20
21 __all__ = ['img_as_float32',
22 'img_as_float64',
23 'img_as_float',
24 'img_as_int',
25 'img_as_uint',
26 'img_as_ubyte',
27 'img_as_bool',
28 'dtype_limits',
29 'view_as_blocks',
30 'view_as_windows',
31 'pad',
32 'crop',
33 'compare_images',
34 'montage',
35 'random_noise',
36 'regular_grid',
37 'regular_seeds',
38 'apply_parallel',
39 'invert',
40 'unique_rows',
41 ]
42
[end of skimage/util/__init__.py]
[start of skimage/_shared/utils.py]
1 import warnings
2 import functools
3 import sys
4 import numpy as np
5 import types
6 import numbers
7
8 from ..util import img_as_float
9 from ._warnings import all_warnings, warn
10
11 __all__ = ['deprecated', 'get_bound_method_class', 'all_warnings',
12 'safe_as_int', 'check_nD', 'check_shape_equality', 'warn']
13
14
15 class skimage_deprecation(Warning):
16 """Create our own deprecation class, since Python >= 2.7
17 silences deprecations by default.
18
19 """
20 pass
21
22
23 class deprecated(object):
24 """Decorator to mark deprecated functions with warning.
25
26 Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.
27
28 Parameters
29 ----------
30 alt_func : str
31 If given, tell user what function to use instead.
32 behavior : {'warn', 'raise'}
33 Behavior during call to deprecated function: 'warn' = warn user that
34 function is deprecated; 'raise' = raise error.
35 removed_version : str
36 The package version in which the deprecated function will be removed.
37 """
38
39 def __init__(self, alt_func=None, behavior='warn', removed_version=None):
40 self.alt_func = alt_func
41 self.behavior = behavior
42 self.removed_version = removed_version
43
44 def __call__(self, func):
45
46 alt_msg = ''
47 if self.alt_func is not None:
48 alt_msg = ' Use ``%s`` instead.' % self.alt_func
49 rmv_msg = ''
50 if self.removed_version is not None:
51 rmv_msg = (' and will be removed in version %s' %
52 self.removed_version)
53
54 msg = ('Function ``%s`` is deprecated' % func.__name__ +
55 rmv_msg + '.' + alt_msg)
56
57 @functools.wraps(func)
58 def wrapped(*args, **kwargs):
59 if self.behavior == 'warn':
60 func_code = func.__code__
61 warnings.simplefilter('always', skimage_deprecation)
62 warnings.warn_explicit(msg,
63 category=skimage_deprecation,
64 filename=func_code.co_filename,
65 lineno=func_code.co_firstlineno + 1)
66 elif self.behavior == 'raise':
67 raise skimage_deprecation(msg)
68 return func(*args, **kwargs)
69
70 # modify doc string to display deprecation warning
71 doc = '**Deprecated function**.' + alt_msg
72 if wrapped.__doc__ is None:
73 wrapped.__doc__ = doc
74 else:
75 wrapped.__doc__ = doc + '\n\n ' + wrapped.__doc__
76
77 return wrapped
78
79
80 def get_bound_method_class(m):
81 """Return the class for a bound method.
82
83 """
84 return m.im_class if sys.version < '3' else m.__self__.__class__
85
86
87 def safe_as_int(val, atol=1e-3):
88 """
89 Attempt to safely cast values to integer format.
90
91 Parameters
92 ----------
93 val : scalar or iterable of scalars
94 Number or container of numbers which are intended to be interpreted as
95 integers, e.g., for indexing purposes, but which may not carry integer
96 type.
97 atol : float
98 Absolute tolerance away from nearest integer to consider values in
99 ``val`` functionally integers.
100
101 Returns
102 -------
103 val_int : NumPy scalar or ndarray of dtype `np.int64`
104 Returns the input value(s) coerced to dtype `np.int64` assuming all
105 were within ``atol`` of the nearest integer.
106
107 Notes
108 -----
109 This operation calculates ``val`` modulo 1, which returns the mantissa of
110 all values. Then all mantissas greater than 0.5 are subtracted from one.
111 Finally, the absolute tolerance from zero is calculated. If it is less
112 than ``atol`` for all value(s) in ``val``, they are rounded and returned
113 in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is
114 returned.
115
116 If any value(s) are outside the specified tolerance, an informative error
117 is raised.
118
119 Examples
120 --------
121 >>> safe_as_int(7.0)
122 7
123
124 >>> safe_as_int([9, 4, 2.9999999999])
125 array([9, 4, 3])
126
127 >>> safe_as_int(53.1)
128 Traceback (most recent call last):
129 ...
130 ValueError: Integer argument required but received 53.1, check inputs.
131
132 >>> safe_as_int(53.01, atol=0.01)
133 53
134
135 """
136 mod = np.asarray(val) % 1 # Extract mantissa
137
138 # Check for and subtract any mod values > 0.5 from 1
139 if mod.ndim == 0: # Scalar input, cannot be indexed
140 if mod > 0.5:
141 mod = 1 - mod
142 else: # Iterable input, now ndarray
143 mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int
144
145 try:
146 np.testing.assert_allclose(mod, 0, atol=atol)
147 except AssertionError:
148 raise ValueError("Integer argument required but received "
149 "{0}, check inputs.".format(val))
150
151 return np.round(val).astype(np.int64)
152
153
154 def check_shape_equality(im1, im2):
155 """Raise an error if the shape do not match."""
156 if not im1.shape == im2.shape:
157 raise ValueError('Input images must have the same dimensions.')
158 return
159
160
161 def check_nD(array, ndim, arg_name='image'):
162 """
163 Verify an array meets the desired ndims and array isn't empty.
164
165 Parameters
166 ----------
167 array : array-like
168 Input array to be validated
169 ndim : int or iterable of ints
170 Allowable ndim or ndims for the array.
171 arg_name : str, optional
172 The name of the array in the original function.
173
174 """
175 array = np.asanyarray(array)
176 msg_incorrect_dim = "The parameter `%s` must be a %s-dimensional array"
177 msg_empty_array = "The parameter `%s` cannot be an empty array"
178 if isinstance(ndim, int):
179 ndim = [ndim]
180 if array.size == 0:
181 raise ValueError(msg_empty_array % (arg_name))
182 if not array.ndim in ndim:
183 raise ValueError(msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])))
184
185
186 def copy_func(f, name=None):
187 """Create a copy of a function.
188
189 Parameters
190 ----------
191 f : function
192 Function to copy.
193 name : str, optional
194 Name of new function.
195
196 """
197 return types.FunctionType(f.__code__, f.__globals__, name or f.__name__,
198 f.__defaults__, f.__closure__)
199
200
201 def check_random_state(seed):
202 """Turn seed into a `np.random.RandomState` instance.
203
204 Parameters
205 ----------
206 seed : None, int or np.random.RandomState
207 If `seed` is None, return the RandomState singleton used by `np.random`.
208 If `seed` is an int, return a new RandomState instance seeded with `seed`.
209 If `seed` is already a RandomState instance, return it.
210
211 Raises
212 ------
213 ValueError
214 If `seed` is of the wrong type.
215
216 """
217 # Function originally from scikit-learn's module sklearn.utils.validation
218 if seed is None or seed is np.random:
219 return np.random.mtrand._rand
220 if isinstance(seed, (numbers.Integral, np.integer)):
221 return np.random.RandomState(seed)
222 if isinstance(seed, np.random.RandomState):
223 return seed
224 raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
225 ' instance' % seed)
226
227
228 def convert_to_float(image, preserve_range):
229 """Convert input image to float image with the appropriate range.
230
231 Parameters
232 ----------
233 image : ndarray
234 Input image.
235 preserve_range : bool
236 Determines if the range of the image should be kept or transformed
237 using img_as_float. Also see
238 https://scikit-image.org/docs/dev/user_guide/data_types.html
239
240 Notes:
241 ------
242 * Input images with `float32` data type are not upcast.
243
244 Returns
245 -------
246 image : ndarray
247 Transformed version of the input.
248
249 """
250 if preserve_range:
251 # Convert image to double only if it is not single or double
252 # precision float
253 if image.dtype.char not in 'df':
254 image = image.astype(float)
255 else:
256 image = img_as_float(image)
257 return image
258
[end of skimage/_shared/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/skimage/_shared/utils.py b/skimage/_shared/utils.py
--- a/skimage/_shared/utils.py
+++ b/skimage/_shared/utils.py
@@ -2,7 +2,6 @@
import functools
import sys
import numpy as np
-import types
import numbers
from ..util import img_as_float
@@ -183,21 +182,6 @@
raise ValueError(msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])))
-def copy_func(f, name=None):
- """Create a copy of a function.
-
- Parameters
- ----------
- f : function
- Function to copy.
- name : str, optional
- Name of new function.
-
- """
- return types.FunctionType(f.__code__, f.__globals__, name or f.__name__,
- f.__defaults__, f.__closure__)
-
-
def check_random_state(seed):
"""Turn seed into a `np.random.RandomState` instance.
diff --git a/skimage/util/__init__.py b/skimage/util/__init__.py
--- a/skimage/util/__init__.py
+++ b/skimage/util/__init__.py
@@ -1,3 +1,6 @@
+import functools
+import warnings
+import numpy as np
from .dtype import (img_as_float32, img_as_float64, img_as_float,
img_as_int, img_as_uint, img_as_ubyte,
img_as_bool, dtype_limits)
@@ -12,10 +15,13 @@
from ._invert import invert
from ._montage import montage
-from .._shared.utils import copy_func
-from numpy import pad as numpy_pad
-pad = copy_func(numpy_pad, name='pad')
[email protected](np.pad)
+def pad(*args, **kwargs):
+ warnings.warn("skimage.util.pad is deprecated and will be removed in "
+ "version 0.19. Please use numpy.pad instead.",
+ FutureWarning, stacklevel=2)
+ return np.pad(*args, **kwargs)
__all__ = ['img_as_float32',
| {"golden_diff": "diff --git a/skimage/_shared/utils.py b/skimage/_shared/utils.py\n--- a/skimage/_shared/utils.py\n+++ b/skimage/_shared/utils.py\n@@ -2,7 +2,6 @@\n import functools\n import sys\n import numpy as np\n-import types\n import numbers\n \n from ..util import img_as_float\n@@ -183,21 +182,6 @@\n raise ValueError(msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])))\n \n \n-def copy_func(f, name=None):\n- \"\"\"Create a copy of a function.\n-\n- Parameters\n- ----------\n- f : function\n- Function to copy.\n- name : str, optional\n- Name of new function.\n-\n- \"\"\"\n- return types.FunctionType(f.__code__, f.__globals__, name or f.__name__,\n- f.__defaults__, f.__closure__)\n-\n-\n def check_random_state(seed):\n \"\"\"Turn seed into a `np.random.RandomState` instance.\n \ndiff --git a/skimage/util/__init__.py b/skimage/util/__init__.py\n--- a/skimage/util/__init__.py\n+++ b/skimage/util/__init__.py\n@@ -1,3 +1,6 @@\n+import functools\n+import warnings\n+import numpy as np\n from .dtype import (img_as_float32, img_as_float64, img_as_float,\n img_as_int, img_as_uint, img_as_ubyte,\n img_as_bool, dtype_limits)\n@@ -12,10 +15,13 @@\n from ._invert import invert\n from ._montage import montage\n \n-from .._shared.utils import copy_func\n \n-from numpy import pad as numpy_pad\n-pad = copy_func(numpy_pad, name='pad')\[email protected](np.pad)\n+def pad(*args, **kwargs):\n+ warnings.warn(\"skimage.util.pad is deprecated and will be removed in \"\n+ \"version 0.19. Please use numpy.pad instead.\",\n+ FutureWarning, stacklevel=2)\n+ return np.pad(*args, **kwargs)\n \n \n __all__ = ['img_as_float32',\n", "issue": "Docstring in util.pad returns <no docstring>\n## Description\r\n\r\n`util.pad()` returns an empty docstring; however, [the docstring is there](https://github.com/scikit-image/scikit-image/blob/v0.12.2/skimage/util/arraypad.py#L1117).\r\nAny idea on what's happening? Thanks y'all!\r\n\r\n## Way to reproduce\r\n```python\r\nIn [1]: from skimage import util \r\n\r\nIn [2]: util.pad? \r\nSignature: util.pad(*args, **kwargs)\r\nDocstring: <no docstring>\r\nFile: ~/<__array_function__ internals>\r\nType: function\r\n```\r\n\r\n\r\n## Version information\r\n```python\r\n# Paste the output of the following python commands\r\n>>> from __future__ import print_function\r\n\r\n>>> import sys; print(sys.version)\r\n3.7.4 (default, Aug 13 2019, 20:35:49) \r\n[GCC 7.3.0]\r\n\r\n>>> import platform; print(platform.platform())\r\nLinux-5.3.0-20-generic-x86_64-with-debian-buster-sid\r\n\r\n>>> import skimage; print(\"scikit-image version: {}\".format(skimage.__version__))\r\nscikit-image version: 0.16.2\r\n\r\n>>> import numpy; print(\"numpy version: {}\".format(numpy.__version__))\r\nnumpy version: 1.17.3\r\n```\n", "before_files": [{"content": "from .dtype import (img_as_float32, img_as_float64, img_as_float,\n img_as_int, img_as_uint, img_as_ubyte,\n img_as_bool, dtype_limits)\nfrom .shape import view_as_blocks, view_as_windows\nfrom .noise import random_noise\nfrom .apply_parallel import apply_parallel\n\nfrom .arraycrop import crop\nfrom .compare import compare_images\nfrom ._regular_grid import regular_grid, regular_seeds\nfrom .unique import unique_rows\nfrom ._invert import invert\nfrom ._montage import montage\n\nfrom .._shared.utils import copy_func\n\nfrom numpy import pad as numpy_pad\npad = copy_func(numpy_pad, name='pad')\n\n\n__all__ = ['img_as_float32',\n 'img_as_float64',\n 'img_as_float',\n 'img_as_int',\n 'img_as_uint',\n 'img_as_ubyte',\n 'img_as_bool',\n 'dtype_limits',\n 'view_as_blocks',\n 'view_as_windows',\n 'pad',\n 'crop',\n 'compare_images',\n 'montage',\n 'random_noise',\n 'regular_grid',\n 'regular_seeds',\n 'apply_parallel',\n 'invert',\n 'unique_rows',\n ]\n", "path": "skimage/util/__init__.py"}, {"content": "import warnings\nimport functools\nimport sys\nimport numpy as np\nimport types\nimport numbers\n\nfrom ..util import img_as_float\nfrom ._warnings import all_warnings, warn\n\n__all__ = ['deprecated', 'get_bound_method_class', 'all_warnings',\n 'safe_as_int', 'check_nD', 'check_shape_equality', 'warn']\n\n\nclass skimage_deprecation(Warning):\n \"\"\"Create our own deprecation class, since Python >= 2.7\n silences deprecations by default.\n\n \"\"\"\n pass\n\n\nclass deprecated(object):\n \"\"\"Decorator to mark deprecated functions with warning.\n\n Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.\n\n Parameters\n ----------\n alt_func : str\n If given, tell user what function to use instead.\n behavior : {'warn', 'raise'}\n Behavior during call to deprecated function: 'warn' = warn user that\n function is deprecated; 'raise' = raise error.\n removed_version : str\n The package version in which the deprecated function will be removed.\n \"\"\"\n\n def __init__(self, alt_func=None, behavior='warn', removed_version=None):\n self.alt_func = alt_func\n self.behavior = behavior\n self.removed_version = removed_version\n\n def __call__(self, func):\n\n alt_msg = ''\n if self.alt_func is not None:\n alt_msg = ' Use ``%s`` instead.' % self.alt_func\n rmv_msg = ''\n if self.removed_version is not None:\n rmv_msg = (' and will be removed in version %s' %\n self.removed_version)\n\n msg = ('Function ``%s`` is deprecated' % func.__name__ +\n rmv_msg + '.' + alt_msg)\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n if self.behavior == 'warn':\n func_code = func.__code__\n warnings.simplefilter('always', skimage_deprecation)\n warnings.warn_explicit(msg,\n category=skimage_deprecation,\n filename=func_code.co_filename,\n lineno=func_code.co_firstlineno + 1)\n elif self.behavior == 'raise':\n raise skimage_deprecation(msg)\n return func(*args, **kwargs)\n\n # modify doc string to display deprecation warning\n doc = '**Deprecated function**.' + alt_msg\n if wrapped.__doc__ is None:\n wrapped.__doc__ = doc\n else:\n wrapped.__doc__ = doc + '\\n\\n ' + wrapped.__doc__\n\n return wrapped\n\n\ndef get_bound_method_class(m):\n \"\"\"Return the class for a bound method.\n\n \"\"\"\n return m.im_class if sys.version < '3' else m.__self__.__class__\n\n\ndef safe_as_int(val, atol=1e-3):\n \"\"\"\n Attempt to safely cast values to integer format.\n\n Parameters\n ----------\n val : scalar or iterable of scalars\n Number or container of numbers which are intended to be interpreted as\n integers, e.g., for indexing purposes, but which may not carry integer\n type.\n atol : float\n Absolute tolerance away from nearest integer to consider values in\n ``val`` functionally integers.\n\n Returns\n -------\n val_int : NumPy scalar or ndarray of dtype `np.int64`\n Returns the input value(s) coerced to dtype `np.int64` assuming all\n were within ``atol`` of the nearest integer.\n\n Notes\n -----\n This operation calculates ``val`` modulo 1, which returns the mantissa of\n all values. Then all mantissas greater than 0.5 are subtracted from one.\n Finally, the absolute tolerance from zero is calculated. If it is less\n than ``atol`` for all value(s) in ``val``, they are rounded and returned\n in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is\n returned.\n\n If any value(s) are outside the specified tolerance, an informative error\n is raised.\n\n Examples\n --------\n >>> safe_as_int(7.0)\n 7\n\n >>> safe_as_int([9, 4, 2.9999999999])\n array([9, 4, 3])\n\n >>> safe_as_int(53.1)\n Traceback (most recent call last):\n ...\n ValueError: Integer argument required but received 53.1, check inputs.\n\n >>> safe_as_int(53.01, atol=0.01)\n 53\n\n \"\"\"\n mod = np.asarray(val) % 1 # Extract mantissa\n\n # Check for and subtract any mod values > 0.5 from 1\n if mod.ndim == 0: # Scalar input, cannot be indexed\n if mod > 0.5:\n mod = 1 - mod\n else: # Iterable input, now ndarray\n mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int\n\n try:\n np.testing.assert_allclose(mod, 0, atol=atol)\n except AssertionError:\n raise ValueError(\"Integer argument required but received \"\n \"{0}, check inputs.\".format(val))\n\n return np.round(val).astype(np.int64)\n\n\ndef check_shape_equality(im1, im2):\n \"\"\"Raise an error if the shape do not match.\"\"\"\n if not im1.shape == im2.shape:\n raise ValueError('Input images must have the same dimensions.')\n return\n\n\ndef check_nD(array, ndim, arg_name='image'):\n \"\"\"\n Verify an array meets the desired ndims and array isn't empty.\n\n Parameters\n ----------\n array : array-like\n Input array to be validated\n ndim : int or iterable of ints\n Allowable ndim or ndims for the array.\n arg_name : str, optional\n The name of the array in the original function.\n\n \"\"\"\n array = np.asanyarray(array)\n msg_incorrect_dim = \"The parameter `%s` must be a %s-dimensional array\"\n msg_empty_array = \"The parameter `%s` cannot be an empty array\"\n if isinstance(ndim, int):\n ndim = [ndim]\n if array.size == 0:\n raise ValueError(msg_empty_array % (arg_name))\n if not array.ndim in ndim:\n raise ValueError(msg_incorrect_dim % (arg_name, '-or-'.join([str(n) for n in ndim])))\n\n\ndef copy_func(f, name=None):\n \"\"\"Create a copy of a function.\n\n Parameters\n ----------\n f : function\n Function to copy.\n name : str, optional\n Name of new function.\n\n \"\"\"\n return types.FunctionType(f.__code__, f.__globals__, name or f.__name__,\n f.__defaults__, f.__closure__)\n\n\ndef check_random_state(seed):\n \"\"\"Turn seed into a `np.random.RandomState` instance.\n\n Parameters\n ----------\n seed : None, int or np.random.RandomState\n If `seed` is None, return the RandomState singleton used by `np.random`.\n If `seed` is an int, return a new RandomState instance seeded with `seed`.\n If `seed` is already a RandomState instance, return it.\n\n Raises\n ------\n ValueError\n If `seed` is of the wrong type.\n\n \"\"\"\n # Function originally from scikit-learn's module sklearn.utils.validation\n if seed is None or seed is np.random:\n return np.random.mtrand._rand\n if isinstance(seed, (numbers.Integral, np.integer)):\n return np.random.RandomState(seed)\n if isinstance(seed, np.random.RandomState):\n return seed\n raise ValueError('%r cannot be used to seed a numpy.random.RandomState'\n ' instance' % seed)\n\n\ndef convert_to_float(image, preserve_range):\n \"\"\"Convert input image to float image with the appropriate range.\n\n Parameters\n ----------\n image : ndarray\n Input image.\n preserve_range : bool\n Determines if the range of the image should be kept or transformed\n using img_as_float. Also see\n https://scikit-image.org/docs/dev/user_guide/data_types.html\n\n Notes:\n ------\n * Input images with `float32` data type are not upcast.\n\n Returns\n -------\n image : ndarray\n Transformed version of the input.\n\n \"\"\"\n if preserve_range:\n # Convert image to double only if it is not single or double\n # precision float\n if image.dtype.char not in 'df':\n image = image.astype(float)\n else:\n image = img_as_float(image)\n return image\n", "path": "skimage/_shared/utils.py"}]} | 3,791 | 479 |
gh_patches_debug_7066 | rasdani/github-patches | git_diff | keras-team__keras-nlp-1211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AutoGraph error upon model.fit() in GPT
**Describe the bug**
<details><summary>StackTrace</summary>
<p>
OperatorNotAllowedInGraphError Traceback (most recent call last)
[<ipython-input-10-6ef525f3a2b6>](https://localhost:8080/#) in <cell line: 1>()
----> 1 model.fit(train_ds, validation_data=val_ds, verbose=2, epochs=EPOCHS)
1 frames
[/usr/local/lib/python3.10/dist-packages/keras_core/src/utils/traceback_utils.py](https://localhost:8080/#) in error_handler(*args, **kwargs)
121 # To get the full stack trace, call:
122 # `keras_core.config.disable_traceback_filtering()`
--> 123 raise e.with_traceback(filtered_tb) from None
124 finally:
125 del filtered_tb
[/usr/local/lib/python3.10/dist-packages/keras_nlp/src/metrics/perplexity.py](https://localhost:8080/#) in result(self)
161
162 def result(self):
--> 163 if self._number_of_samples == 0:
164 return 0.0
165 perplexity_score = ops.exp(
OperatorNotAllowedInGraphError: Using a symbolic `tf.Tensor` as a Python `bool` is not allowed: AutoGraph is disabled in this function. Try decorating it directly with @tf.function.
</p>
</details>
**To Reproduce**
[Colab Notebook](https://colab.research.google.com/drive/1779-TFbLUbVapOl8BQS1aLJF2LlLT2ZS?usp=sharing)
**Would you like to help us fix it?**
Yes
</issue>
<code>
[start of keras_nlp/metrics/perplexity.py]
1 # Copyright 2023 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Perplexity metric."""
16
17 from keras_nlp.api_export import keras_nlp_export
18 from keras_nlp.backend import keras
19 from keras_nlp.backend import ops
20 from keras_nlp.utils.tensor_utils import is_floating_dtype
21
22
23 @keras_nlp_export("keras_nlp.metrics.Perplexity")
24 class Perplexity(keras.metrics.Metric):
25 """Perplexity metric.
26
27 This class implements the perplexity metric. In short, this class calculates
28 the cross entropy loss and takes its exponent.
29 Note: This implementation is not suitable for fixed-size windows.
30
31 Args:
32 from_logits: bool. If True, `y_pred` (input to `update_state()`) should
33 be the logits as returned by the model. Otherwise, `y_pred` is a
34 tensor of probabilities.
35 mask_token_id: int. ID of the token to be masked. If provided, the mask
36 is computed for this class. Note that if this field is provided, and
37 if the `sample_weight` field in `update_state()` is also provided,
38 we will compute the final `sample_weight` as the element-wise
39 product of the mask and the `sample_weight`.
40 dtype: string or tf.dtypes.Dtype. Precision of metric computation. If
41 not specified, it defaults to `"float32"`.
42 name: string. Name of the metric instance.
43 **kwargs: Other keyword arguments.
44
45 Examples:
46
47 1. Calculate perplexity by calling update_state() and result().
48 1.1. `sample_weight`, and `mask_token_id` are not provided.
49 >>> np.random.seed(42)
50 >>> perplexity = keras_nlp.metrics.Perplexity(name="perplexity")
51 >>> target = np.random.randint(10, size=[2, 5])
52 >>> logits = np.random.uniform(size=(2, 5, 10))
53 >>> perplexity.update_state(target, logits)
54 >>> perplexity.result()
55 <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>
56
57 1.2. `sample_weight` specified (masking token with ID 0).
58 >>> np.random.seed(42)
59 >>> perplexity = keras_nlp.metrics.Perplexity(name="perplexity")
60 >>> target = np.random.randint(10, size=[2, 5])
61 >>> logits = np.random.uniform(size=(2, 5, 10))
62 >>> sample_weight = (target != 0).astype("float32")
63 >>> perplexity.update_state(target, logits, sample_weight)
64 >>> perplexity.result()
65 <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>
66
67 2. Call perplexity directly.
68 >>> np.random.seed(42)
69 >>> perplexity = keras_nlp.metrics.Perplexity(name="perplexity")
70 >>> target = np.random.randint(10, size=[2, 5])
71 >>> logits = np.random.uniform(size=(2, 5, 10))
72 >>> perplexity(target, logits)
73 <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>
74
75 3. Provide the padding token ID and let the class compute the mask on its
76 own.
77 >>> np.random.seed(42)
78 >>> perplexity = keras_nlp.metrics.Perplexity(mask_token_id=0)
79 >>> target = np.random.randint(10, size=[2, 5])
80 >>> logits = np.random.uniform(size=(2, 5, 10))
81 >>> perplexity(target, logits)
82 <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>
83 """
84
85 def __init__(
86 self,
87 from_logits=False,
88 mask_token_id=None,
89 dtype="float32",
90 name="perplexity",
91 **kwargs,
92 ):
93 if not is_floating_dtype(dtype):
94 raise ValueError(
95 "`dtype` must be a floating point type. "
96 f"Received: dtype={dtype}"
97 )
98
99 super().__init__(name=name, dtype=dtype, **kwargs)
100
101 self._crossentropy = keras.losses.SparseCategoricalCrossentropy(
102 from_logits=from_logits, reduction="sum"
103 )
104
105 self.from_logits = from_logits
106 self.mask_token_id = mask_token_id
107
108 self._aggregate_crossentropy = self.add_weight(
109 shape=(),
110 initializer="zeros",
111 dtype=self.dtype,
112 name="aggregate_crossentropy",
113 )
114 self._number_of_samples = self.add_weight(
115 shape=(),
116 initializer="zeros",
117 dtype=self.dtype,
118 name="number_of_samples",
119 )
120
121 def update_state(self, y_true, y_pred, sample_weight=None):
122 # y_true shape: (batch_size, seq_len)
123 # y_pred shape: (batch_size, seq_len, vocab_size)
124 y_true = ops.cast(y_true, self.dtype)
125 y_pred = ops.cast(y_pred, self.dtype)
126
127 if sample_weight is not None:
128 sample_weight = ops.cast(sample_weight, self.dtype)
129
130 batch_size = ops.cast(ops.shape(y_true)[0], self.dtype)
131
132 if self.mask_token_id is not None:
133 mask = ops.cast(
134 ops.logical_not(ops.equal(y_true, self.mask_token_id)),
135 self.dtype,
136 )
137 if sample_weight is None:
138 sample_weight = mask
139 else:
140 sample_weight = ops.multiply(mask, sample_weight)
141
142 # Calculate the Cross Entropy Loss.
143 crossentropy_value = ops.cast(
144 self._crossentropy(y_true, y_pred, sample_weight=sample_weight),
145 self.dtype,
146 ) # scalar
147
148 # Divide the loss by the number of non-masked tokens
149 if sample_weight is not None:
150 crossentropy_value = crossentropy_value / ops.sum(
151 sample_weight
152 ) # scalar
153 else:
154 crossentropy_value = crossentropy_value / (
155 ops.cast(ops.shape(y_true)[0], self.dtype)
156 * ops.cast(ops.shape(y_true)[1], self.dtype)
157 ) # scalar
158
159 self._aggregate_crossentropy.assign_add(batch_size * crossentropy_value)
160 self._number_of_samples.assign_add(batch_size)
161
162 def result(self):
163 if self._number_of_samples == 0:
164 return 0.0
165 perplexity_score = ops.exp(
166 self._aggregate_crossentropy / self._number_of_samples
167 )
168 return perplexity_score
169
170 def reset_state(self):
171 self._aggregate_crossentropy.assign(0.0)
172 self._number_of_samples.assign(0.0)
173
174 def get_config(self):
175 config = super().get_config()
176 config.update(
177 {
178 "from_logits": self.from_logits,
179 "mask_token_id": self.mask_token_id,
180 }
181 )
182 return config
183
[end of keras_nlp/metrics/perplexity.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/keras_nlp/metrics/perplexity.py b/keras_nlp/metrics/perplexity.py
--- a/keras_nlp/metrics/perplexity.py
+++ b/keras_nlp/metrics/perplexity.py
@@ -160,10 +160,10 @@
self._number_of_samples.assign_add(batch_size)
def result(self):
- if self._number_of_samples == 0:
- return 0.0
- perplexity_score = ops.exp(
- self._aggregate_crossentropy / self._number_of_samples
+ perplexity_score = ops.where(
+ ops.equal(self._number_of_samples, 0),
+ 0,
+ ops.exp(self._aggregate_crossentropy / self._number_of_samples),
)
return perplexity_score
| {"golden_diff": "diff --git a/keras_nlp/metrics/perplexity.py b/keras_nlp/metrics/perplexity.py\n--- a/keras_nlp/metrics/perplexity.py\n+++ b/keras_nlp/metrics/perplexity.py\n@@ -160,10 +160,10 @@\n self._number_of_samples.assign_add(batch_size)\n \n def result(self):\n- if self._number_of_samples == 0:\n- return 0.0\n- perplexity_score = ops.exp(\n- self._aggregate_crossentropy / self._number_of_samples\n+ perplexity_score = ops.where(\n+ ops.equal(self._number_of_samples, 0),\n+ 0,\n+ ops.exp(self._aggregate_crossentropy / self._number_of_samples),\n )\n return perplexity_score\n", "issue": "AutoGraph error upon model.fit() in GPT \n**Describe the bug**\r\n\r\n<details><summary>StackTrace</summary>\r\n<p>\r\n\r\nOperatorNotAllowedInGraphError Traceback (most recent call last)\r\n[<ipython-input-10-6ef525f3a2b6>](https://localhost:8080/#) in <cell line: 1>()\r\n----> 1 model.fit(train_ds, validation_data=val_ds, verbose=2, epochs=EPOCHS)\r\n\r\n1 frames\r\n[/usr/local/lib/python3.10/dist-packages/keras_core/src/utils/traceback_utils.py](https://localhost:8080/#) in error_handler(*args, **kwargs)\r\n 121 # To get the full stack trace, call:\r\n 122 # `keras_core.config.disable_traceback_filtering()`\r\n--> 123 raise e.with_traceback(filtered_tb) from None\r\n 124 finally:\r\n 125 del filtered_tb\r\n\r\n[/usr/local/lib/python3.10/dist-packages/keras_nlp/src/metrics/perplexity.py](https://localhost:8080/#) in result(self)\r\n 161 \r\n 162 def result(self):\r\n--> 163 if self._number_of_samples == 0:\r\n 164 return 0.0\r\n 165 perplexity_score = ops.exp(\r\n\r\nOperatorNotAllowedInGraphError: Using a symbolic `tf.Tensor` as a Python `bool` is not allowed: AutoGraph is disabled in this function. Try decorating it directly with @tf.function.\r\n\r\n</p>\r\n</details> \r\n\r\n**To Reproduce**\r\n[Colab Notebook](https://colab.research.google.com/drive/1779-TFbLUbVapOl8BQS1aLJF2LlLT2ZS?usp=sharing)\r\n\r\n\r\n\r\n**Would you like to help us fix it?**\r\nYes\n", "before_files": [{"content": "# Copyright 2023 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Perplexity metric.\"\"\"\n\nfrom keras_nlp.api_export import keras_nlp_export\nfrom keras_nlp.backend import keras\nfrom keras_nlp.backend import ops\nfrom keras_nlp.utils.tensor_utils import is_floating_dtype\n\n\n@keras_nlp_export(\"keras_nlp.metrics.Perplexity\")\nclass Perplexity(keras.metrics.Metric):\n \"\"\"Perplexity metric.\n\n This class implements the perplexity metric. In short, this class calculates\n the cross entropy loss and takes its exponent.\n Note: This implementation is not suitable for fixed-size windows.\n\n Args:\n from_logits: bool. If True, `y_pred` (input to `update_state()`) should\n be the logits as returned by the model. Otherwise, `y_pred` is a\n tensor of probabilities.\n mask_token_id: int. ID of the token to be masked. If provided, the mask\n is computed for this class. Note that if this field is provided, and\n if the `sample_weight` field in `update_state()` is also provided,\n we will compute the final `sample_weight` as the element-wise\n product of the mask and the `sample_weight`.\n dtype: string or tf.dtypes.Dtype. Precision of metric computation. If\n not specified, it defaults to `\"float32\"`.\n name: string. Name of the metric instance.\n **kwargs: Other keyword arguments.\n\n Examples:\n\n 1. Calculate perplexity by calling update_state() and result().\n 1.1. `sample_weight`, and `mask_token_id` are not provided.\n >>> np.random.seed(42)\n >>> perplexity = keras_nlp.metrics.Perplexity(name=\"perplexity\")\n >>> target = np.random.randint(10, size=[2, 5])\n >>> logits = np.random.uniform(size=(2, 5, 10))\n >>> perplexity.update_state(target, logits)\n >>> perplexity.result()\n <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>\n\n 1.2. `sample_weight` specified (masking token with ID 0).\n >>> np.random.seed(42)\n >>> perplexity = keras_nlp.metrics.Perplexity(name=\"perplexity\")\n >>> target = np.random.randint(10, size=[2, 5])\n >>> logits = np.random.uniform(size=(2, 5, 10))\n >>> sample_weight = (target != 0).astype(\"float32\")\n >>> perplexity.update_state(target, logits, sample_weight)\n >>> perplexity.result()\n <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>\n\n 2. Call perplexity directly.\n >>> np.random.seed(42)\n >>> perplexity = keras_nlp.metrics.Perplexity(name=\"perplexity\")\n >>> target = np.random.randint(10, size=[2, 5])\n >>> logits = np.random.uniform(size=(2, 5, 10))\n >>> perplexity(target, logits)\n <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>\n\n 3. Provide the padding token ID and let the class compute the mask on its\n own.\n >>> np.random.seed(42)\n >>> perplexity = keras_nlp.metrics.Perplexity(mask_token_id=0)\n >>> target = np.random.randint(10, size=[2, 5])\n >>> logits = np.random.uniform(size=(2, 5, 10))\n >>> perplexity(target, logits)\n <tf.Tensor: shape=(), dtype=float32, numpy=14.352535>\n \"\"\"\n\n def __init__(\n self,\n from_logits=False,\n mask_token_id=None,\n dtype=\"float32\",\n name=\"perplexity\",\n **kwargs,\n ):\n if not is_floating_dtype(dtype):\n raise ValueError(\n \"`dtype` must be a floating point type. \"\n f\"Received: dtype={dtype}\"\n )\n\n super().__init__(name=name, dtype=dtype, **kwargs)\n\n self._crossentropy = keras.losses.SparseCategoricalCrossentropy(\n from_logits=from_logits, reduction=\"sum\"\n )\n\n self.from_logits = from_logits\n self.mask_token_id = mask_token_id\n\n self._aggregate_crossentropy = self.add_weight(\n shape=(),\n initializer=\"zeros\",\n dtype=self.dtype,\n name=\"aggregate_crossentropy\",\n )\n self._number_of_samples = self.add_weight(\n shape=(),\n initializer=\"zeros\",\n dtype=self.dtype,\n name=\"number_of_samples\",\n )\n\n def update_state(self, y_true, y_pred, sample_weight=None):\n # y_true shape: (batch_size, seq_len)\n # y_pred shape: (batch_size, seq_len, vocab_size)\n y_true = ops.cast(y_true, self.dtype)\n y_pred = ops.cast(y_pred, self.dtype)\n\n if sample_weight is not None:\n sample_weight = ops.cast(sample_weight, self.dtype)\n\n batch_size = ops.cast(ops.shape(y_true)[0], self.dtype)\n\n if self.mask_token_id is not None:\n mask = ops.cast(\n ops.logical_not(ops.equal(y_true, self.mask_token_id)),\n self.dtype,\n )\n if sample_weight is None:\n sample_weight = mask\n else:\n sample_weight = ops.multiply(mask, sample_weight)\n\n # Calculate the Cross Entropy Loss.\n crossentropy_value = ops.cast(\n self._crossentropy(y_true, y_pred, sample_weight=sample_weight),\n self.dtype,\n ) # scalar\n\n # Divide the loss by the number of non-masked tokens\n if sample_weight is not None:\n crossentropy_value = crossentropy_value / ops.sum(\n sample_weight\n ) # scalar\n else:\n crossentropy_value = crossentropy_value / (\n ops.cast(ops.shape(y_true)[0], self.dtype)\n * ops.cast(ops.shape(y_true)[1], self.dtype)\n ) # scalar\n\n self._aggregate_crossentropy.assign_add(batch_size * crossentropy_value)\n self._number_of_samples.assign_add(batch_size)\n\n def result(self):\n if self._number_of_samples == 0:\n return 0.0\n perplexity_score = ops.exp(\n self._aggregate_crossentropy / self._number_of_samples\n )\n return perplexity_score\n\n def reset_state(self):\n self._aggregate_crossentropy.assign(0.0)\n self._number_of_samples.assign(0.0)\n\n def get_config(self):\n config = super().get_config()\n config.update(\n {\n \"from_logits\": self.from_logits,\n \"mask_token_id\": self.mask_token_id,\n }\n )\n return config\n", "path": "keras_nlp/metrics/perplexity.py"}]} | 3,048 | 181 |
gh_patches_debug_958 | rasdani/github-patches | git_diff | nvaccess__nvda-10921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
VS Code: Reduce the number of times one has to use NVDA+Space to switch modes.
I just filed [this issue](https://github.com/microsoft/vscode/issues/93087) against VS Code where I suggest to use targeted role="document" in those places that produce HTML output for consumption, to make NVDA switch in and out of browse mode in a smart, automated, manner, reducing the number of times one has to use NVDA+Space to toggle modes. Examples I found while using the 1.44 VS Code Insider builds were:
* The Welcome page
* The details page for an extension
* The ReadMe file that may be displayed after an extension has been installed.
@leonardder suggested that, once this lands in stable, a modification might be needed for the VS Code app module. So filing this issue here.
</issue>
<code>
[start of source/appModules/code.py]
1 #appModules/code.py
2 #A part of NonVisual Desktop Access (NVDA)
3 #Copyright (C) 2019 NV Access Limited, Babbage B.V.
4 #This file is covered by the GNU General Public License.
5 #See the file COPYING for more details.
6
7 import appModuleHandler
8
9 class AppModule(appModuleHandler.AppModule):
10 disableBrowseModeByDefault = True
11
[end of source/appModules/code.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/source/appModules/code.py b/source/appModules/code.py
deleted file mode 100644
--- a/source/appModules/code.py
+++ /dev/null
@@ -1,10 +0,0 @@
-#appModules/code.py
-#A part of NonVisual Desktop Access (NVDA)
-#Copyright (C) 2019 NV Access Limited, Babbage B.V.
-#This file is covered by the GNU General Public License.
-#See the file COPYING for more details.
-
-import appModuleHandler
-
-class AppModule(appModuleHandler.AppModule):
- disableBrowseModeByDefault = True
| {"golden_diff": "diff --git a/source/appModules/code.py b/source/appModules/code.py\ndeleted file mode 100644\n--- a/source/appModules/code.py\n+++ /dev/null\n@@ -1,10 +0,0 @@\n-#appModules/code.py\n-#A part of NonVisual Desktop Access (NVDA)\n-#Copyright (C) 2019 NV Access Limited, Babbage B.V.\n-#This file is covered by the GNU General Public License.\n-#See the file COPYING for more details.\n-\n-import appModuleHandler\n-\n-class AppModule(appModuleHandler.AppModule):\n-\tdisableBrowseModeByDefault = True\n", "issue": "VS Code: Reduce the number of times one has to use NVDA+Space to switch modes.\nI just filed [this issue](https://github.com/microsoft/vscode/issues/93087) against VS Code where I suggest to use targeted role=\"document\" in those places that produce HTML output for consumption, to make NVDA switch in and out of browse mode in a smart, automated, manner, reducing the number of times one has to use NVDA+Space to toggle modes. Examples I found while using the 1.44 VS Code Insider builds were:\r\n\r\n* The Welcome page\r\n* The details page for an extension\r\n* The ReadMe file that may be displayed after an extension has been installed.\r\n\r\n@leonardder suggested that, once this lands in stable, a modification might be needed for the VS Code app module. So filing this issue here.\n", "before_files": [{"content": "#appModules/code.py\n#A part of NonVisual Desktop Access (NVDA)\n#Copyright (C) 2019 NV Access Limited, Babbage B.V.\n#This file is covered by the GNU General Public License.\n#See the file COPYING for more details.\n\nimport appModuleHandler\n\nclass AppModule(appModuleHandler.AppModule):\n\tdisableBrowseModeByDefault = True\n", "path": "source/appModules/code.py"}]} | 811 | 139 |
gh_patches_debug_39654 | rasdani/github-patches | git_diff | ansible-collections__community.general-4794 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
community.general.sudoers should do a syntax check
### Summary
The `sudoers` module currently allows writing broken `sudoers` files, for example when `commands` is not an absolute path. This kind of user error can easily be detected by running `visudo -c -f ...` on the new file. I think it makes sense for the `sudoers` module to run this check.
Some errors, such as duplicate aliases, can only be detected in the context of the other `sudoers` files. According to [this post](https://serverfault.com/a/901906) a more comprehensive check can be done by running a full configuration check `visudo -c` after installing the new file. However, I'm not sure if this is worth implementing. The `sudoers` module does not currently seem to be capable of producing syntax error that can not be detected by a simple `visudo -c -f ...`.
### Issue Type
Feature Idea
### Component Name
sudoers
### Additional Information
Slightly modified example task from the documentation:
```yaml
- name: >-
Allow the alice user to run sudo /bin/systemctl restart my-service or
sudo /bin/systemctl reload my-service, but a password is required
community.general.sudoers:
name: alice-service
user: alice
commands:
- systemctl restart my-service
```
`sudo` commands complain about the syntax error:
```
visudo -c -f /etc/sudoers.d/alice-service
alice-service:1:21: syntax error
alice ALL=NOPASSWD: systemctl restart my-service
^~~~~~~~~
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
</issue>
<code>
[start of plugins/modules/system/sudoers.py]
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4
5 # Copyright: (c) 2019, Jon Ellis (@JonEllis) <[email protected]>
6 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = '''
13 ---
14 module: sudoers
15 short_description: Manage sudoers files
16 version_added: "4.3.0"
17 description:
18 - This module allows for the manipulation of sudoers files.
19 author:
20 - "Jon Ellis (@JonEllis) <[email protected]>"
21 options:
22 commands:
23 description:
24 - The commands allowed by the sudoers rule.
25 - Multiple can be added by passing a list of commands.
26 - Use C(ALL) for all commands.
27 type: list
28 elements: str
29 group:
30 description:
31 - The name of the group for the sudoers rule.
32 - This option cannot be used in conjunction with I(user).
33 type: str
34 name:
35 required: true
36 description:
37 - The name of the sudoers rule.
38 - This will be used for the filename for the sudoers file managed by this rule.
39 type: str
40 nopassword:
41 description:
42 - Whether a password will be required to run the sudo'd command.
43 default: true
44 type: bool
45 runas:
46 description:
47 - Specify the target user the command(s) will run as.
48 type: str
49 version_added: 4.7.0
50 sudoers_path:
51 description:
52 - The path which sudoers config files will be managed in.
53 default: /etc/sudoers.d
54 type: str
55 state:
56 default: "present"
57 choices:
58 - present
59 - absent
60 description:
61 - Whether the rule should exist or not.
62 type: str
63 user:
64 description:
65 - The name of the user for the sudoers rule.
66 - This option cannot be used in conjunction with I(group).
67 type: str
68 '''
69
70 EXAMPLES = '''
71 - name: Allow the backup user to sudo /usr/local/bin/backup
72 community.general.sudoers:
73 name: allow-backup
74 state: present
75 user: backup
76 commands: /usr/local/bin/backup
77
78 - name: Allow the bob user to run any commands as alice with sudo -u alice
79 community.general.sudoers:
80 name: bob-do-as-alice
81 state: present
82 user: bob
83 runas: alice
84 commands: ALL
85
86 - name: >-
87 Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics
88 without requiring a password
89 community.general.sudoers:
90 name: monitor-app
91 group: monitoring
92 commands: /usr/local/bin/gather-app-metrics
93
94 - name: >-
95 Allow the alice user to run sudo /bin/systemctl restart my-service or
96 sudo /bin/systemctl reload my-service, but a password is required
97 community.general.sudoers:
98 name: alice-service
99 user: alice
100 commands:
101 - /bin/systemctl restart my-service
102 - /bin/systemctl reload my-service
103 nopassword: false
104
105 - name: Revoke the previous sudo grants given to the alice user
106 community.general.sudoers:
107 name: alice-service
108 state: absent
109 '''
110
111 import os
112 from ansible.module_utils.basic import AnsibleModule
113 from ansible.module_utils.common.text.converters import to_native
114
115
116 class Sudoers(object):
117
118 FILE_MODE = 0o440
119
120 def __init__(self, module):
121 self.check_mode = module.check_mode
122 self.name = module.params['name']
123 self.user = module.params['user']
124 self.group = module.params['group']
125 self.state = module.params['state']
126 self.nopassword = module.params['nopassword']
127 self.runas = module.params['runas']
128 self.sudoers_path = module.params['sudoers_path']
129 self.file = os.path.join(self.sudoers_path, self.name)
130 self.commands = module.params['commands']
131
132 def write(self):
133 if self.check_mode:
134 return
135
136 with open(self.file, 'w') as f:
137 f.write(self.content())
138
139 os.chmod(self.file, self.FILE_MODE)
140
141 def delete(self):
142 if self.check_mode:
143 return
144
145 os.remove(self.file)
146
147 def exists(self):
148 return os.path.exists(self.file)
149
150 def matches(self):
151 with open(self.file, 'r') as f:
152 content_matches = f.read() == self.content()
153
154 current_mode = os.stat(self.file).st_mode & 0o777
155 mode_matches = current_mode == self.FILE_MODE
156
157 return content_matches and mode_matches
158
159 def content(self):
160 if self.user:
161 owner = self.user
162 elif self.group:
163 owner = '%{group}'.format(group=self.group)
164
165 commands_str = ', '.join(self.commands)
166 nopasswd_str = 'NOPASSWD:' if self.nopassword else ''
167 runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
168 return "{owner} ALL={runas}{nopasswd} {commands}\n".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)
169
170 def run(self):
171 if self.state == 'absent':
172 if self.exists():
173 self.delete()
174 return True
175 else:
176 return False
177
178 if self.exists() and self.matches():
179 return False
180
181 self.write()
182 return True
183
184
185 def main():
186 argument_spec = {
187 'commands': {
188 'type': 'list',
189 'elements': 'str',
190 },
191 'group': {},
192 'name': {
193 'required': True,
194 },
195 'nopassword': {
196 'type': 'bool',
197 'default': True,
198 },
199 'runas': {
200 'type': 'str',
201 'default': None,
202 },
203 'sudoers_path': {
204 'type': 'str',
205 'default': '/etc/sudoers.d',
206 },
207 'state': {
208 'default': 'present',
209 'choices': ['present', 'absent'],
210 },
211 'user': {},
212 }
213
214 module = AnsibleModule(
215 argument_spec=argument_spec,
216 mutually_exclusive=[['user', 'group']],
217 supports_check_mode=True,
218 required_if=[('state', 'present', ['commands'])],
219 )
220
221 sudoers = Sudoers(module)
222
223 try:
224 changed = sudoers.run()
225 module.exit_json(changed=changed)
226 except Exception as e:
227 module.fail_json(msg=to_native(e))
228
229
230 if __name__ == '__main__':
231 main()
232
[end of plugins/modules/system/sudoers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugins/modules/system/sudoers.py b/plugins/modules/system/sudoers.py
--- a/plugins/modules/system/sudoers.py
+++ b/plugins/modules/system/sudoers.py
@@ -65,6 +65,15 @@
- The name of the user for the sudoers rule.
- This option cannot be used in conjunction with I(group).
type: str
+ validation:
+ description:
+ - If C(absent), the sudoers rule will be added without validation.
+ - If C(detect) and visudo is available, then the sudoers rule will be validated by visudo.
+ - If C(required), visudo must be available to validate the sudoers rule.
+ type: str
+ default: detect
+ choices: [ absent, detect, required ]
+ version_added: 5.2.0
'''
EXAMPLES = '''
@@ -118,6 +127,8 @@
FILE_MODE = 0o440
def __init__(self, module):
+ self.module = module
+
self.check_mode = module.check_mode
self.name = module.params['name']
self.user = module.params['user']
@@ -128,6 +139,7 @@
self.sudoers_path = module.params['sudoers_path']
self.file = os.path.join(self.sudoers_path, self.name)
self.commands = module.params['commands']
+ self.validation = module.params['validation']
def write(self):
if self.check_mode:
@@ -167,6 +179,20 @@
runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
return "{owner} ALL={runas}{nopasswd} {commands}\n".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)
+ def validate(self):
+ if self.validation == 'absent':
+ return
+
+ visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required')
+ if visudo_path is None:
+ return
+
+ check_command = [visudo_path, '-c', '-f', '-']
+ rc, stdout, stderr = self.module.run_command(check_command, data=self.content())
+
+ if rc != 0:
+ raise Exception('Failed to validate sudoers rule:\n{stdout}'.format(stdout=stdout))
+
def run(self):
if self.state == 'absent':
if self.exists():
@@ -175,6 +201,8 @@
else:
return False
+ self.validate()
+
if self.exists() and self.matches():
return False
@@ -209,6 +237,10 @@
'choices': ['present', 'absent'],
},
'user': {},
+ 'validation': {
+ 'default': 'detect',
+ 'choices': ['absent', 'detect', 'required']
+ },
}
module = AnsibleModule(
| {"golden_diff": "diff --git a/plugins/modules/system/sudoers.py b/plugins/modules/system/sudoers.py\n--- a/plugins/modules/system/sudoers.py\n+++ b/plugins/modules/system/sudoers.py\n@@ -65,6 +65,15 @@\n - The name of the user for the sudoers rule.\n - This option cannot be used in conjunction with I(group).\n type: str\n+ validation:\n+ description:\n+ - If C(absent), the sudoers rule will be added without validation.\n+ - If C(detect) and visudo is available, then the sudoers rule will be validated by visudo.\n+ - If C(required), visudo must be available to validate the sudoers rule.\n+ type: str\n+ default: detect\n+ choices: [ absent, detect, required ]\n+ version_added: 5.2.0\n '''\n \n EXAMPLES = '''\n@@ -118,6 +127,8 @@\n FILE_MODE = 0o440\n \n def __init__(self, module):\n+ self.module = module\n+\n self.check_mode = module.check_mode\n self.name = module.params['name']\n self.user = module.params['user']\n@@ -128,6 +139,7 @@\n self.sudoers_path = module.params['sudoers_path']\n self.file = os.path.join(self.sudoers_path, self.name)\n self.commands = module.params['commands']\n+ self.validation = module.params['validation']\n \n def write(self):\n if self.check_mode:\n@@ -167,6 +179,20 @@\n runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''\n return \"{owner} ALL={runas}{nopasswd} {commands}\\n\".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)\n \n+ def validate(self):\n+ if self.validation == 'absent':\n+ return\n+\n+ visudo_path = self.module.get_bin_path('visudo', required=self.validation == 'required')\n+ if visudo_path is None:\n+ return\n+\n+ check_command = [visudo_path, '-c', '-f', '-']\n+ rc, stdout, stderr = self.module.run_command(check_command, data=self.content())\n+\n+ if rc != 0:\n+ raise Exception('Failed to validate sudoers rule:\\n{stdout}'.format(stdout=stdout))\n+\n def run(self):\n if self.state == 'absent':\n if self.exists():\n@@ -175,6 +201,8 @@\n else:\n return False\n \n+ self.validate()\n+\n if self.exists() and self.matches():\n return False\n \n@@ -209,6 +237,10 @@\n 'choices': ['present', 'absent'],\n },\n 'user': {},\n+ 'validation': {\n+ 'default': 'detect',\n+ 'choices': ['absent', 'detect', 'required']\n+ },\n }\n \n module = AnsibleModule(\n", "issue": "community.general.sudoers should do a syntax check\n### Summary\n\nThe `sudoers` module currently allows writing broken `sudoers` files, for example when `commands` is not an absolute path. This kind of user error can easily be detected by running `visudo -c -f ...` on the new file. I think it makes sense for the `sudoers` module to run this check.\r\n\r\nSome errors, such as duplicate aliases, can only be detected in the context of the other `sudoers` files. According to [this post](https://serverfault.com/a/901906) a more comprehensive check can be done by running a full configuration check `visudo -c` after installing the new file. However, I'm not sure if this is worth implementing. The `sudoers` module does not currently seem to be capable of producing syntax error that can not be detected by a simple `visudo -c -f ...`.\n\n### Issue Type\n\nFeature Idea\n\n### Component Name\n\nsudoers\n\n### Additional Information\n\nSlightly modified example task from the documentation:\r\n```yaml\r\n- name: >-\r\n Allow the alice user to run sudo /bin/systemctl restart my-service or\r\n sudo /bin/systemctl reload my-service, but a password is required\r\n community.general.sudoers:\r\n name: alice-service\r\n user: alice\r\n commands:\r\n - systemctl restart my-service\r\n```\r\n\r\n`sudo` commands complain about the syntax error:\r\n```\r\nvisudo -c -f /etc/sudoers.d/alice-service\r\nalice-service:1:21: syntax error\r\nalice ALL=NOPASSWD: systemctl restart my-service\r\n ^~~~~~~~~\r\n```\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n# Copyright: (c) 2019, Jon Ellis (@JonEllis) <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: sudoers\nshort_description: Manage sudoers files\nversion_added: \"4.3.0\"\ndescription:\n - This module allows for the manipulation of sudoers files.\nauthor:\n - \"Jon Ellis (@JonEllis) <[email protected]>\"\noptions:\n commands:\n description:\n - The commands allowed by the sudoers rule.\n - Multiple can be added by passing a list of commands.\n - Use C(ALL) for all commands.\n type: list\n elements: str\n group:\n description:\n - The name of the group for the sudoers rule.\n - This option cannot be used in conjunction with I(user).\n type: str\n name:\n required: true\n description:\n - The name of the sudoers rule.\n - This will be used for the filename for the sudoers file managed by this rule.\n type: str\n nopassword:\n description:\n - Whether a password will be required to run the sudo'd command.\n default: true\n type: bool\n runas:\n description:\n - Specify the target user the command(s) will run as.\n type: str\n version_added: 4.7.0\n sudoers_path:\n description:\n - The path which sudoers config files will be managed in.\n default: /etc/sudoers.d\n type: str\n state:\n default: \"present\"\n choices:\n - present\n - absent\n description:\n - Whether the rule should exist or not.\n type: str\n user:\n description:\n - The name of the user for the sudoers rule.\n - This option cannot be used in conjunction with I(group).\n type: str\n'''\n\nEXAMPLES = '''\n- name: Allow the backup user to sudo /usr/local/bin/backup\n community.general.sudoers:\n name: allow-backup\n state: present\n user: backup\n commands: /usr/local/bin/backup\n\n- name: Allow the bob user to run any commands as alice with sudo -u alice\n community.general.sudoers:\n name: bob-do-as-alice\n state: present\n user: bob\n runas: alice\n commands: ALL\n\n- name: >-\n Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics\n without requiring a password\n community.general.sudoers:\n name: monitor-app\n group: monitoring\n commands: /usr/local/bin/gather-app-metrics\n\n- name: >-\n Allow the alice user to run sudo /bin/systemctl restart my-service or\n sudo /bin/systemctl reload my-service, but a password is required\n community.general.sudoers:\n name: alice-service\n user: alice\n commands:\n - /bin/systemctl restart my-service\n - /bin/systemctl reload my-service\n nopassword: false\n\n- name: Revoke the previous sudo grants given to the alice user\n community.general.sudoers:\n name: alice-service\n state: absent\n'''\n\nimport os\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.common.text.converters import to_native\n\n\nclass Sudoers(object):\n\n FILE_MODE = 0o440\n\n def __init__(self, module):\n self.check_mode = module.check_mode\n self.name = module.params['name']\n self.user = module.params['user']\n self.group = module.params['group']\n self.state = module.params['state']\n self.nopassword = module.params['nopassword']\n self.runas = module.params['runas']\n self.sudoers_path = module.params['sudoers_path']\n self.file = os.path.join(self.sudoers_path, self.name)\n self.commands = module.params['commands']\n\n def write(self):\n if self.check_mode:\n return\n\n with open(self.file, 'w') as f:\n f.write(self.content())\n\n os.chmod(self.file, self.FILE_MODE)\n\n def delete(self):\n if self.check_mode:\n return\n\n os.remove(self.file)\n\n def exists(self):\n return os.path.exists(self.file)\n\n def matches(self):\n with open(self.file, 'r') as f:\n content_matches = f.read() == self.content()\n\n current_mode = os.stat(self.file).st_mode & 0o777\n mode_matches = current_mode == self.FILE_MODE\n\n return content_matches and mode_matches\n\n def content(self):\n if self.user:\n owner = self.user\n elif self.group:\n owner = '%{group}'.format(group=self.group)\n\n commands_str = ', '.join(self.commands)\n nopasswd_str = 'NOPASSWD:' if self.nopassword else ''\n runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''\n return \"{owner} ALL={runas}{nopasswd} {commands}\\n\".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)\n\n def run(self):\n if self.state == 'absent':\n if self.exists():\n self.delete()\n return True\n else:\n return False\n\n if self.exists() and self.matches():\n return False\n\n self.write()\n return True\n\n\ndef main():\n argument_spec = {\n 'commands': {\n 'type': 'list',\n 'elements': 'str',\n },\n 'group': {},\n 'name': {\n 'required': True,\n },\n 'nopassword': {\n 'type': 'bool',\n 'default': True,\n },\n 'runas': {\n 'type': 'str',\n 'default': None,\n },\n 'sudoers_path': {\n 'type': 'str',\n 'default': '/etc/sudoers.d',\n },\n 'state': {\n 'default': 'present',\n 'choices': ['present', 'absent'],\n },\n 'user': {},\n }\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[['user', 'group']],\n supports_check_mode=True,\n required_if=[('state', 'present', ['commands'])],\n )\n\n sudoers = Sudoers(module)\n\n try:\n changed = sudoers.run()\n module.exit_json(changed=changed)\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/sudoers.py"}]} | 3,030 | 687 |
gh_patches_debug_27836 | rasdani/github-patches | git_diff | encode__starlette-151 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wildcard domains and TrustedhostMiddleware
Support for wildcard domains
</issue>
<code>
[start of starlette/middleware/trustedhost.py]
1 from starlette.datastructures import Headers
2 from starlette.responses import PlainTextResponse
3 from starlette.types import ASGIApp, ASGIInstance, Scope
4 import typing
5
6
7 class TrustedHostMiddleware:
8 def __init__(
9 self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = ["*"]
10 ) -> None:
11 self.app = app
12 self.allowed_hosts = allowed_hosts
13 self.allow_any = "*" in allowed_hosts
14
15 def __call__(self, scope: Scope) -> ASGIInstance:
16 if scope["type"] in ("http", "websocket") and not self.allow_any:
17 headers = Headers(scope=scope)
18 host = headers.get("host")
19 if host not in self.allowed_hosts:
20 return PlainTextResponse("Invalid host header", status_code=400)
21
22 return self.app(scope)
23
[end of starlette/middleware/trustedhost.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/starlette/middleware/trustedhost.py b/starlette/middleware/trustedhost.py
--- a/starlette/middleware/trustedhost.py
+++ b/starlette/middleware/trustedhost.py
@@ -4,10 +4,17 @@
import typing
+ENFORCE_DOMAIN_WILDCARD = "Domain wildcard patterns must be like '*.example.com'."
+
+
class TrustedHostMiddleware:
def __init__(
self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = ["*"]
) -> None:
+ for pattern in allowed_hosts:
+ assert "*" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD
+ if pattern.startswith("*") and pattern != "*":
+ assert pattern.startswith("*."), ENFORCE_DOMAIN_WILDCARD
self.app = app
self.allowed_hosts = allowed_hosts
self.allow_any = "*" in allowed_hosts
@@ -15,8 +22,15 @@
def __call__(self, scope: Scope) -> ASGIInstance:
if scope["type"] in ("http", "websocket") and not self.allow_any:
headers = Headers(scope=scope)
- host = headers.get("host")
- if host not in self.allowed_hosts:
+ host = headers.get("host", "").split(":")[0]
+ for pattern in self.allowed_hosts:
+ if (
+ host == pattern
+ or pattern.startswith("*")
+ and host.endswith(pattern[1:])
+ ):
+ break
+ else:
return PlainTextResponse("Invalid host header", status_code=400)
return self.app(scope)
| {"golden_diff": "diff --git a/starlette/middleware/trustedhost.py b/starlette/middleware/trustedhost.py\n--- a/starlette/middleware/trustedhost.py\n+++ b/starlette/middleware/trustedhost.py\n@@ -4,10 +4,17 @@\n import typing\n \n \n+ENFORCE_DOMAIN_WILDCARD = \"Domain wildcard patterns must be like '*.example.com'.\"\n+\n+\n class TrustedHostMiddleware:\n def __init__(\n self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = [\"*\"]\n ) -> None:\n+ for pattern in allowed_hosts:\n+ assert \"*\" not in pattern[1:], ENFORCE_DOMAIN_WILDCARD\n+ if pattern.startswith(\"*\") and pattern != \"*\":\n+ assert pattern.startswith(\"*.\"), ENFORCE_DOMAIN_WILDCARD\n self.app = app\n self.allowed_hosts = allowed_hosts\n self.allow_any = \"*\" in allowed_hosts\n@@ -15,8 +22,15 @@\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\") and not self.allow_any:\n headers = Headers(scope=scope)\n- host = headers.get(\"host\")\n- if host not in self.allowed_hosts:\n+ host = headers.get(\"host\", \"\").split(\":\")[0]\n+ for pattern in self.allowed_hosts:\n+ if (\n+ host == pattern\n+ or pattern.startswith(\"*\")\n+ and host.endswith(pattern[1:])\n+ ):\n+ break\n+ else:\n return PlainTextResponse(\"Invalid host header\", status_code=400)\n \n return self.app(scope)\n", "issue": "Wildcard domains and TrustedhostMiddleware\nSupport for wildcard domains\n", "before_files": [{"content": "from starlette.datastructures import Headers\nfrom starlette.responses import PlainTextResponse\nfrom starlette.types import ASGIApp, ASGIInstance, Scope\nimport typing\n\n\nclass TrustedHostMiddleware:\n def __init__(\n self, app: ASGIApp, allowed_hosts: typing.Sequence[str] = [\"*\"]\n ) -> None:\n self.app = app\n self.allowed_hosts = allowed_hosts\n self.allow_any = \"*\" in allowed_hosts\n\n def __call__(self, scope: Scope) -> ASGIInstance:\n if scope[\"type\"] in (\"http\", \"websocket\") and not self.allow_any:\n headers = Headers(scope=scope)\n host = headers.get(\"host\")\n if host not in self.allowed_hosts:\n return PlainTextResponse(\"Invalid host header\", status_code=400)\n\n return self.app(scope)\n", "path": "starlette/middleware/trustedhost.py"}]} | 771 | 355 |
gh_patches_debug_30801 | rasdani/github-patches | git_diff | numba__numba-1719 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LinkedList jitclass example is broken
```
Internal error:
TypeError: Invalid store of %"deferred.4329823704.value" to {i8*, {i32, {%"deferred.4329823704.data", i8}}*} in <numba.datamodel.models.OptionalModel object at 0x106713278>
File "linkedlist.py", line 53
```
</issue>
<code>
[start of numba/targets/optional.py]
1 from __future__ import print_function, absolute_import, division
2
3 from numba import types, cgutils
4
5 from .imputils import lower_cast
6
7
8 def make_optional(valtype):
9 """
10 Return the Structure representation of a optional value
11 """
12 return cgutils.create_struct_proxy(types.Optional(valtype))
13
14
15 def always_return_true_impl(context, builder, sig, args):
16 return cgutils.true_bit
17
18
19 def always_return_false_impl(context, builder, sig, args):
20 return cgutils.false_bit
21
22
23 @lower_cast(types.Any, types.Optional)
24 def any_to_optional(context, builder, fromty, toty, val):
25 if fromty == types.none:
26 return context.make_optional_none(builder, toty.type)
27 else:
28 val = context.cast(builder, val, fromty, toty.type)
29 return context.make_optional_value(builder, toty.type, val)
30
31 @lower_cast(types.Optional, types.Any)
32 def optional_to_any(context, builder, fromty, toty, val):
33 optty = context.make_optional(fromty)
34 optval = optty(context, builder, value=val)
35 validbit = cgutils.as_bool_bit(builder, optval.valid)
36 with builder.if_then(builder.not_(validbit), likely=False):
37 msg = "expected %s, got None" % (fromty.type,)
38 context.call_conv.return_user_exc(builder, TypeError, (msg,))
39
40 return optval.data
41
[end of numba/targets/optional.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numba/targets/optional.py b/numba/targets/optional.py
--- a/numba/targets/optional.py
+++ b/numba/targets/optional.py
@@ -20,6 +20,39 @@
return cgutils.false_bit
+@lower_cast(types.Optional, types.Optional)
+def optional_to_optional(context, builder, fromty, toty, val):
+ """
+ The handling of optional->optional cast must be special cased for
+ correct propagation of None value. Given type T and U. casting of
+ T? to U? (? denotes optional) should always succeed. If the from-value
+ is None, the None value the casted value (U?) should be None; otherwise,
+ the from-value is casted to U. This is different from casting T? to U,
+ which requires the from-value must not be None.
+ """
+ optty = context.make_optional(fromty)
+ optval = optty(context, builder, value=val)
+ validbit = cgutils.as_bool_bit(builder, optval.valid)
+ # Create uninitialized optional value
+ outoptty = context.make_optional(toty)
+ outoptval = outoptty(context, builder)
+
+ with builder.if_else(validbit) as (is_valid, is_not_valid):
+ with is_valid:
+ # Cast internal value
+ outoptval.valid = cgutils.true_bit
+ outoptval.data = context.cast(builder, optval.data,
+ fromty.type, toty.type)
+
+ with is_not_valid:
+ # Store None to result
+ outoptval.valid = cgutils.false_bit
+ outoptval.data = cgutils.get_null_value(
+ outoptval.data.type)
+
+ return outoptval._getvalue()
+
+
@lower_cast(types.Any, types.Optional)
def any_to_optional(context, builder, fromty, toty, val):
if fromty == types.none:
@@ -28,6 +61,7 @@
val = context.cast(builder, val, fromty, toty.type)
return context.make_optional_value(builder, toty.type, val)
+
@lower_cast(types.Optional, types.Any)
def optional_to_any(context, builder, fromty, toty, val):
optty = context.make_optional(fromty)
| {"golden_diff": "diff --git a/numba/targets/optional.py b/numba/targets/optional.py\n--- a/numba/targets/optional.py\n+++ b/numba/targets/optional.py\n@@ -20,6 +20,39 @@\n return cgutils.false_bit\n \n \n+@lower_cast(types.Optional, types.Optional)\n+def optional_to_optional(context, builder, fromty, toty, val):\n+ \"\"\"\n+ The handling of optional->optional cast must be special cased for\n+ correct propagation of None value. Given type T and U. casting of\n+ T? to U? (? denotes optional) should always succeed. If the from-value\n+ is None, the None value the casted value (U?) should be None; otherwise,\n+ the from-value is casted to U. This is different from casting T? to U,\n+ which requires the from-value must not be None.\n+ \"\"\"\n+ optty = context.make_optional(fromty)\n+ optval = optty(context, builder, value=val)\n+ validbit = cgutils.as_bool_bit(builder, optval.valid)\n+ # Create uninitialized optional value\n+ outoptty = context.make_optional(toty)\n+ outoptval = outoptty(context, builder)\n+\n+ with builder.if_else(validbit) as (is_valid, is_not_valid):\n+ with is_valid:\n+ # Cast internal value\n+ outoptval.valid = cgutils.true_bit\n+ outoptval.data = context.cast(builder, optval.data,\n+ fromty.type, toty.type)\n+\n+ with is_not_valid:\n+ # Store None to result\n+ outoptval.valid = cgutils.false_bit\n+ outoptval.data = cgutils.get_null_value(\n+ outoptval.data.type)\n+\n+ return outoptval._getvalue()\n+\n+\n @lower_cast(types.Any, types.Optional)\n def any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n@@ -28,6 +61,7 @@\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n \n+\n @lower_cast(types.Optional, types.Any)\n def optional_to_any(context, builder, fromty, toty, val):\n optty = context.make_optional(fromty)\n", "issue": "LinkedList jitclass example is broken\n```\nInternal error:\nTypeError: Invalid store of %\"deferred.4329823704.value\" to {i8*, {i32, {%\"deferred.4329823704.data\", i8}}*} in <numba.datamodel.models.OptionalModel object at 0x106713278>\nFile \"linkedlist.py\", line 53\n```\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import, division\n\nfrom numba import types, cgutils\n\nfrom .imputils import lower_cast\n\n\ndef make_optional(valtype):\n \"\"\"\n Return the Structure representation of a optional value\n \"\"\"\n return cgutils.create_struct_proxy(types.Optional(valtype))\n\n\ndef always_return_true_impl(context, builder, sig, args):\n return cgutils.true_bit\n\n\ndef always_return_false_impl(context, builder, sig, args):\n return cgutils.false_bit\n\n\n@lower_cast(types.Any, types.Optional)\ndef any_to_optional(context, builder, fromty, toty, val):\n if fromty == types.none:\n return context.make_optional_none(builder, toty.type)\n else:\n val = context.cast(builder, val, fromty, toty.type)\n return context.make_optional_value(builder, toty.type, val)\n\n@lower_cast(types.Optional, types.Any)\ndef optional_to_any(context, builder, fromty, toty, val):\n optty = context.make_optional(fromty)\n optval = optty(context, builder, value=val)\n validbit = cgutils.as_bool_bit(builder, optval.valid)\n with builder.if_then(builder.not_(validbit), likely=False):\n msg = \"expected %s, got None\" % (fromty.type,)\n context.call_conv.return_user_exc(builder, TypeError, (msg,))\n\n return optval.data\n", "path": "numba/targets/optional.py"}]} | 1,021 | 525 |
gh_patches_debug_3087 | rasdani/github-patches | git_diff | svthalia__concrexit-3485 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase profile retention period
<!-- Please add the appropriate label for what change should be made -->
### What?
Currently, Profiles are dataminimised 1 month after the member's last membership ends.
We should increase that period to 3 months. I have discussed this with @JeeVee11.
### Why?
It happens quite often that people forget to renew their membership, and decide to do it later than the first of october. In those cases, currently, their profile will already have been wiped. To make it easier for those people to still renew, we should extend the retention period a bit longer.
### How?
Change a constant somewhere in `members/services.py`, and update the privacy policy.
</issue>
<code>
[start of website/members/services.py]
1 """Services defined in the members package."""
2 from collections.abc import Callable
3 from datetime import date
4 from typing import Any
5
6 from django.conf import settings
7 from django.db.models import Count, Q
8 from django.utils import timezone
9
10 from members import emails
11 from members.models import Member, Membership
12 from utils.snippets import datetime_to_lectureyear
13
14
15 def _member_group_memberships(
16 member: Member, condition: Callable[[Membership], bool]
17 ) -> dict[str, dict[str, Any]]:
18 """Determine the group membership of a user based on a condition.
19
20 :return: Object with group memberships
21 """
22 memberships = member.membergroupmembership_set.all()
23 data = {}
24
25 for membership in memberships:
26 if not condition(membership):
27 continue
28 period = {
29 "since": membership.since,
30 "until": membership.until,
31 "chair": membership.chair,
32 }
33
34 if hasattr(membership.group, "board"):
35 period["role"] = membership.role
36
37 if membership.until is None and hasattr(membership.group, "board"):
38 period["until"] = membership.group.board.until
39
40 name = membership.group.name
41 if data.get(name):
42 data[name]["periods"].append(period)
43 if data[name]["earliest"] > period["since"]:
44 data[name]["earliest"] = period["since"]
45 if period["until"] is None or (
46 data[name]["latest"] is not None
47 and data[name]["latest"] < period["until"]
48 ):
49 data[name]["latest"] = period["until"]
50 data[name]["periods"].sort(key=lambda x: x["since"])
51 else:
52 data[name] = {
53 "pk": membership.group.pk,
54 "active": membership.group.active,
55 "name": name,
56 "periods": [period],
57 "url": settings.BASE_URL + membership.group.get_absolute_url(),
58 "earliest": period["since"],
59 "latest": period["until"],
60 }
61 return data
62
63
64 def member_achievements(member) -> list:
65 """Derive a list of achievements of a member.
66
67 Committee and board memberships + mentorships
68 """
69 achievements = _member_group_memberships(
70 member,
71 lambda membership: (
72 hasattr(membership.group, "board") or hasattr(membership.group, "committee")
73 ),
74 )
75
76 mentor_years = member.mentorship_set.all()
77 for mentor_year in mentor_years:
78 name = f"Mentor in {mentor_year.year}"
79 # Ensure mentorships appear last but are sorted
80 earliest = date.today()
81 earliest = earliest.replace(year=earliest.year + mentor_year.year)
82 # Making sure it does not crash in leap years
83 if earliest.month == 2 and earliest.day == 29:
84 earliest = earliest.replace(day=28)
85 if not achievements.get(name):
86 achievements[name] = {
87 "name": name,
88 "earliest": earliest,
89 }
90 return sorted(achievements.values(), key=lambda x: x["earliest"])
91
92
93 def member_societies(member) -> list:
94 """Derive a list of societies a member was part of."""
95 societies = _member_group_memberships(
96 member, lambda membership: (hasattr(membership.group, "society"))
97 )
98 return sorted(societies.values(), key=lambda x: x["earliest"])
99
100
101 def gen_stats_member_type() -> dict[str, list]:
102 """Generate statistics about membership types."""
103 data = {
104 "labels": [],
105 "datasets": [
106 {"data": []},
107 ],
108 }
109
110 for key, display in Membership.MEMBERSHIP_TYPES:
111 data["labels"].append(str(display))
112 data["datasets"][0]["data"].append(
113 Membership.objects.filter(since__lte=date.today())
114 .filter(Q(until__isnull=True) | Q(until__gt=date.today()))
115 .filter(type=key)
116 .count()
117 )
118
119 return data
120
121
122 def gen_stats_year() -> dict[str, list]:
123 """Generate statistics on how many members (and other membership types) there were in each cohort."""
124 years = range(2015, datetime_to_lectureyear(date.today()))
125
126 data = {
127 "labels": list(years),
128 "datasets": [
129 {"label": str(display), "data": []}
130 for _, display in Membership.MEMBERSHIP_TYPES
131 ],
132 }
133
134 for index, (key, _) in enumerate(Membership.MEMBERSHIP_TYPES):
135 for year in years:
136 data["datasets"][index]["data"].append(
137 Membership.objects.filter(since__lte=date(year=year, month=9, day=1))
138 .filter(
139 Q(until__isnull=True) | Q(until__gt=date(year=year, month=9, day=1))
140 )
141 .filter(type=key)
142 .count()
143 )
144
145 return data
146
147
148 def gen_stats_active_members() -> dict[str, list]:
149 """Generate statistics about active members."""
150 return {
151 "labels": ["Active Members", "Non-active Members"],
152 "datasets": [
153 {
154 "data": [
155 Member.active_members.count(),
156 Member.current_members.count() - Member.active_members.count(),
157 ]
158 }
159 ],
160 }
161
162
163 def verify_email_change(change_request) -> None:
164 """Mark the email change request as verified.
165
166 :param change_request: the email change request
167 """
168 change_request.verified = True
169 change_request.save()
170
171 process_email_change(change_request)
172
173
174 def confirm_email_change(change_request) -> None:
175 """Mark the email change request as verified.
176
177 :param change_request: the email change request
178 """
179 change_request.confirmed = True
180 change_request.save()
181
182 process_email_change(change_request)
183
184
185 def process_email_change(change_request) -> None:
186 """Change the user's email address if the request was completed and send the completion email.
187
188 :param change_request: the email change request
189 """
190 if not change_request.completed:
191 return
192
193 member = change_request.member
194 member.email = change_request.email
195 member.save()
196
197 emails.send_email_change_completion_message(change_request)
198
199
200 def execute_data_minimisation(dry_run=False, members=None) -> list[Member]:
201 """Clean the profiles of members/users of whom the last membership ended at least 31 days ago.
202
203 :param dry_run: does not really remove data if True
204 :param members: queryset of members to process, optional
205 :return: list of processed members
206 """
207 if not members:
208 members = Member.objects
209 members = (
210 members.annotate(membership_count=Count("membership"))
211 .exclude(
212 (
213 Q(membership__until__isnull=True)
214 | Q(membership__until__gt=timezone.now().date())
215 )
216 & Q(membership_count__gt=0)
217 )
218 .distinct()
219 .prefetch_related("membership_set", "profile")
220 )
221 deletion_period = timezone.now().date() - timezone.timedelta(days=31)
222 processed_members = []
223 for member in members:
224 if (
225 member.latest_membership is None
226 or member.latest_membership.until <= deletion_period
227 ):
228 processed_members.append(member)
229 profile = member.profile
230 profile.student_number = None
231 profile.phone_number = None
232 profile.address_street = None
233 profile.address_street2 = None
234 profile.address_postal_code = None
235 profile.address_city = None
236 profile.address_country = None
237 profile.birthday = None
238 profile.emergency_contact_phone_number = None
239 profile.emergency_contact = None
240 profile.is_minimized = True
241 if not dry_run:
242 profile.save()
243
244 return processed_members
245
[end of website/members/services.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/members/services.py b/website/members/services.py
--- a/website/members/services.py
+++ b/website/members/services.py
@@ -218,7 +218,7 @@
.distinct()
.prefetch_related("membership_set", "profile")
)
- deletion_period = timezone.now().date() - timezone.timedelta(days=31)
+ deletion_period = timezone.now().date() - timezone.timedelta(days=90)
processed_members = []
for member in members:
if (
| {"golden_diff": "diff --git a/website/members/services.py b/website/members/services.py\n--- a/website/members/services.py\n+++ b/website/members/services.py\n@@ -218,7 +218,7 @@\n .distinct()\n .prefetch_related(\"membership_set\", \"profile\")\n )\n- deletion_period = timezone.now().date() - timezone.timedelta(days=31)\n+ deletion_period = timezone.now().date() - timezone.timedelta(days=90)\n processed_members = []\n for member in members:\n if (\n", "issue": "Increase profile retention period\n<!-- Please add the appropriate label for what change should be made -->\r\n\r\n### What?\r\nCurrently, Profiles are dataminimised 1 month after the member's last membership ends.\r\nWe should increase that period to 3 months. I have discussed this with @JeeVee11.\r\n\r\n### Why?\r\nIt happens quite often that people forget to renew their membership, and decide to do it later than the first of october. In those cases, currently, their profile will already have been wiped. To make it easier for those people to still renew, we should extend the retention period a bit longer.\r\n\r\n### How?\r\nChange a constant somewhere in `members/services.py`, and update the privacy policy. \r\n\n", "before_files": [{"content": "\"\"\"Services defined in the members package.\"\"\"\nfrom collections.abc import Callable\nfrom datetime import date\nfrom typing import Any\n\nfrom django.conf import settings\nfrom django.db.models import Count, Q\nfrom django.utils import timezone\n\nfrom members import emails\nfrom members.models import Member, Membership\nfrom utils.snippets import datetime_to_lectureyear\n\n\ndef _member_group_memberships(\n member: Member, condition: Callable[[Membership], bool]\n) -> dict[str, dict[str, Any]]:\n \"\"\"Determine the group membership of a user based on a condition.\n\n :return: Object with group memberships\n \"\"\"\n memberships = member.membergroupmembership_set.all()\n data = {}\n\n for membership in memberships:\n if not condition(membership):\n continue\n period = {\n \"since\": membership.since,\n \"until\": membership.until,\n \"chair\": membership.chair,\n }\n\n if hasattr(membership.group, \"board\"):\n period[\"role\"] = membership.role\n\n if membership.until is None and hasattr(membership.group, \"board\"):\n period[\"until\"] = membership.group.board.until\n\n name = membership.group.name\n if data.get(name):\n data[name][\"periods\"].append(period)\n if data[name][\"earliest\"] > period[\"since\"]:\n data[name][\"earliest\"] = period[\"since\"]\n if period[\"until\"] is None or (\n data[name][\"latest\"] is not None\n and data[name][\"latest\"] < period[\"until\"]\n ):\n data[name][\"latest\"] = period[\"until\"]\n data[name][\"periods\"].sort(key=lambda x: x[\"since\"])\n else:\n data[name] = {\n \"pk\": membership.group.pk,\n \"active\": membership.group.active,\n \"name\": name,\n \"periods\": [period],\n \"url\": settings.BASE_URL + membership.group.get_absolute_url(),\n \"earliest\": period[\"since\"],\n \"latest\": period[\"until\"],\n }\n return data\n\n\ndef member_achievements(member) -> list:\n \"\"\"Derive a list of achievements of a member.\n\n Committee and board memberships + mentorships\n \"\"\"\n achievements = _member_group_memberships(\n member,\n lambda membership: (\n hasattr(membership.group, \"board\") or hasattr(membership.group, \"committee\")\n ),\n )\n\n mentor_years = member.mentorship_set.all()\n for mentor_year in mentor_years:\n name = f\"Mentor in {mentor_year.year}\"\n # Ensure mentorships appear last but are sorted\n earliest = date.today()\n earliest = earliest.replace(year=earliest.year + mentor_year.year)\n # Making sure it does not crash in leap years\n if earliest.month == 2 and earliest.day == 29:\n earliest = earliest.replace(day=28)\n if not achievements.get(name):\n achievements[name] = {\n \"name\": name,\n \"earliest\": earliest,\n }\n return sorted(achievements.values(), key=lambda x: x[\"earliest\"])\n\n\ndef member_societies(member) -> list:\n \"\"\"Derive a list of societies a member was part of.\"\"\"\n societies = _member_group_memberships(\n member, lambda membership: (hasattr(membership.group, \"society\"))\n )\n return sorted(societies.values(), key=lambda x: x[\"earliest\"])\n\n\ndef gen_stats_member_type() -> dict[str, list]:\n \"\"\"Generate statistics about membership types.\"\"\"\n data = {\n \"labels\": [],\n \"datasets\": [\n {\"data\": []},\n ],\n }\n\n for key, display in Membership.MEMBERSHIP_TYPES:\n data[\"labels\"].append(str(display))\n data[\"datasets\"][0][\"data\"].append(\n Membership.objects.filter(since__lte=date.today())\n .filter(Q(until__isnull=True) | Q(until__gt=date.today()))\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_year() -> dict[str, list]:\n \"\"\"Generate statistics on how many members (and other membership types) there were in each cohort.\"\"\"\n years = range(2015, datetime_to_lectureyear(date.today()))\n\n data = {\n \"labels\": list(years),\n \"datasets\": [\n {\"label\": str(display), \"data\": []}\n for _, display in Membership.MEMBERSHIP_TYPES\n ],\n }\n\n for index, (key, _) in enumerate(Membership.MEMBERSHIP_TYPES):\n for year in years:\n data[\"datasets\"][index][\"data\"].append(\n Membership.objects.filter(since__lte=date(year=year, month=9, day=1))\n .filter(\n Q(until__isnull=True) | Q(until__gt=date(year=year, month=9, day=1))\n )\n .filter(type=key)\n .count()\n )\n\n return data\n\n\ndef gen_stats_active_members() -> dict[str, list]:\n \"\"\"Generate statistics about active members.\"\"\"\n return {\n \"labels\": [\"Active Members\", \"Non-active Members\"],\n \"datasets\": [\n {\n \"data\": [\n Member.active_members.count(),\n Member.current_members.count() - Member.active_members.count(),\n ]\n }\n ],\n }\n\n\ndef verify_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.verified = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef confirm_email_change(change_request) -> None:\n \"\"\"Mark the email change request as verified.\n\n :param change_request: the email change request\n \"\"\"\n change_request.confirmed = True\n change_request.save()\n\n process_email_change(change_request)\n\n\ndef process_email_change(change_request) -> None:\n \"\"\"Change the user's email address if the request was completed and send the completion email.\n\n :param change_request: the email change request\n \"\"\"\n if not change_request.completed:\n return\n\n member = change_request.member\n member.email = change_request.email\n member.save()\n\n emails.send_email_change_completion_message(change_request)\n\n\ndef execute_data_minimisation(dry_run=False, members=None) -> list[Member]:\n \"\"\"Clean the profiles of members/users of whom the last membership ended at least 31 days ago.\n\n :param dry_run: does not really remove data if True\n :param members: queryset of members to process, optional\n :return: list of processed members\n \"\"\"\n if not members:\n members = Member.objects\n members = (\n members.annotate(membership_count=Count(\"membership\"))\n .exclude(\n (\n Q(membership__until__isnull=True)\n | Q(membership__until__gt=timezone.now().date())\n )\n & Q(membership_count__gt=0)\n )\n .distinct()\n .prefetch_related(\"membership_set\", \"profile\")\n )\n deletion_period = timezone.now().date() - timezone.timedelta(days=31)\n processed_members = []\n for member in members:\n if (\n member.latest_membership is None\n or member.latest_membership.until <= deletion_period\n ):\n processed_members.append(member)\n profile = member.profile\n profile.student_number = None\n profile.phone_number = None\n profile.address_street = None\n profile.address_street2 = None\n profile.address_postal_code = None\n profile.address_city = None\n profile.address_country = None\n profile.birthday = None\n profile.emergency_contact_phone_number = None\n profile.emergency_contact = None\n profile.is_minimized = True\n if not dry_run:\n profile.save()\n\n return processed_members\n", "path": "website/members/services.py"}]} | 2,983 | 118 |
gh_patches_debug_28871 | rasdani/github-patches | git_diff | dask__distributed-8528 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proxy to worker dashboard not working
```
dask scheduler
dask worker <scheduler addr> --dashboard
```
Then navigate to `http://127.0.0.1:8787/info/main/workers.html` and click on dashboard. This should proxy to the worker dashboard but instead it triggers the below exception (with a 500 error code, of course)
```python-traceback
/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py:607: UserWarning: The Tornado web application does not have an 'identity_provider' defined in its settings. In future releases of jupyter_server, this will be a required key for all subclasses of `JupyterHandler`. For an example, see the jupyter_server source code for how to add an identity provider to the tornado settings: https://github.com/jupyter-server/jupyter_server/blob/v2.0.0/jupyter_server/serverapp.py#L242
if type(self.identity_provider) is IdentityProvider and mod_obj.__name__ != __name__:
2023-11-10 14:28:01,113 - tornado.application - ERROR - Uncaught exception GET /proxy/50121/192.168.2.53/status (127.0.0.1)
HTTPServerRequest(protocol='http', host='127.0.0.1:8787', method='GET', uri='/proxy/50121/192.168.2.53/status', version='HTTP/1.1', remote_ip='127.0.0.1')
Traceback (most recent call last):
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1765, in _execute
result = await result # type: ignore
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 622, in prepare
_user = await _user
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py", line 240, in _get_user
_cookie_user = self.get_user_cookie(handler)
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py", line 398, in get_user_cookie
_user_cookie = handler.get_secure_cookie(
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 836, in get_signed_cookie
self.require_setting("cookie_secret", "secure cookies")
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1669, in require_setting
raise Exception(
Exception: You must define the 'cookie_secret' setting in your application to use secure cookies
2023-11-10 14:28:01,121 - tornado.application - ERROR - Uncaught exception in write_error
Traceback (most recent call last):
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1765, in _execute
result = await result # type: ignore
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 622, in prepare
_user = await _user
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py", line 240, in _get_user
_cookie_user = self.get_user_cookie(handler)
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py", line 398, in get_user_cookie
_user_cookie = handler.get_secure_cookie(
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 836, in get_signed_cookie
self.require_setting("cookie_secret", "secure cookies")
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1669, in require_setting
raise Exception(
Exception: You must define the 'cookie_secret' setting in your application to use secure cookies
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1294, in send_error
self.write_error(status_code, **kwargs)
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 719, in write_error
html = self.render_template("%s.html" % status_code, **ns)
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 645, in render_template
ns.update(self.template_namespace)
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 655, in template_namespace
logged_in=self.logged_in,
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 197, in logged_in
user = self.current_user
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py", line 1420, in current_user
self._current_user = self.get_current_user()
File "/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py", line 176, in get_current_user
raise RuntimeError(msg)
RuntimeError: Calling `GlobalProxyHandler.get_current_user()` directly is deprecated in jupyter-server 2.0. Use `self.current_user` instead (works in all versions).
```
```
jupyter_server 2.10.0 pyhd8ed1ab_0 conda-forge
jupyter_server_terminals 0.4.4 pyhd8ed1ab_1 conda-forge
jupyter-server-proxy 4.1.0 pyhd8ed1ab_0 conda-forge
tornado 6.3.3 py310h2aa6e3c_1 conda-forge
```
</issue>
<code>
[start of distributed/http/proxy.py]
1 from __future__ import annotations
2
3 import logging
4
5 from tornado import web
6
7 logger = logging.getLogger(__name__)
8
9 try:
10 from jupyter_server_proxy.handlers import ProxyHandler
11
12 class GlobalProxyHandler(ProxyHandler):
13 """
14 A tornado request handler that proxies HTTP and websockets
15 from a port to any valid endpoint'.
16 """
17
18 def initialize(self, dask_server=None, extra=None):
19 self.scheduler = dask_server
20 self.extra = extra or {}
21
22 async def http_get(self, port, host, proxied_path):
23 # route here first
24 # incoming URI /proxy/{port}/{host}/{proxied_path}
25
26 self.host = host
27
28 # rewrite uri for jupyter-server-proxy handling
29 uri = f"/proxy/{port}/{proxied_path}"
30 self.request.uri = uri
31
32 # slash is removed during regex in handler
33 proxied_path = "/%s" % proxied_path
34
35 worker = f"{self.host}:{port}"
36 if not check_worker_dashboard_exits(self.scheduler, worker):
37 msg = "Worker <%s> does not exist" % worker
38 self.set_status(400)
39 self.finish(msg)
40 return
41 return await self.proxy(port, proxied_path)
42
43 async def open(self, port, host, proxied_path):
44 # finally, proxy to other address/port
45 return await self.proxy_open(host, port, proxied_path)
46
47 def post(self, port, proxied_path):
48 return self.proxy(port, proxied_path)
49
50 def put(self, port, proxied_path):
51 return self.proxy(port, proxied_path)
52
53 def delete(self, port, proxied_path):
54 return self.proxy(port, proxied_path)
55
56 def head(self, port, proxied_path):
57 return self.proxy(port, proxied_path)
58
59 def patch(self, port, proxied_path):
60 return self.proxy(port, proxied_path)
61
62 def options(self, port, proxied_path):
63 return self.proxy(port, proxied_path)
64
65 def proxy(self, port, proxied_path):
66 # router here second
67 # returns ProxyHandler coroutine
68 return super().proxy(self.host, port, proxied_path)
69
70 except ImportError:
71 logger.info(
72 "To route to workers diagnostics web server "
73 "please install jupyter-server-proxy: "
74 "python -m pip install jupyter-server-proxy"
75 )
76
77 class GlobalProxyHandler(web.RequestHandler): # type: ignore
78 """Minimal Proxy handler when jupyter-server-proxy is not installed"""
79
80 def initialize(self, dask_server=None, extra=None):
81 self.server = dask_server
82 self.extra = extra or {}
83
84 def get(self, port, host, proxied_path):
85 worker_url = f"{host}:{port}/{proxied_path}"
86 msg = """
87 <p> Try navigating to <a href=http://{}>{}</a> for your worker dashboard </p>
88
89 <p>
90 Dask tried to proxy you to that page through your
91 Scheduler's dashboard connection, but you don't have
92 jupyter-server-proxy installed. You may want to install it
93 with either conda or pip, and then restart your scheduler.
94 </p>
95
96 <p><pre> conda install jupyter-server-proxy -c conda-forge </pre></p>
97 <p><pre> python -m pip install jupyter-server-proxy</pre></p>
98
99 <p>
100 The link above should work though if your workers are on a
101 sufficiently open network. This is common on single machines,
102 but less common in production clusters. Your IT administrators
103 will know more
104 </p>
105 """.format(
106 worker_url,
107 worker_url,
108 )
109 self.write(msg)
110
111
112 def check_worker_dashboard_exits(scheduler, worker):
113 """Check addr:port exists as a worker in scheduler list
114
115 Parameters
116 ----------
117 worker : str
118 addr:port
119
120 Returns
121 -------
122 bool
123 """
124 addr, port = worker.split(":")
125 workers = list(scheduler.workers.values())
126 for w in workers:
127 bokeh_port = w.services.get("dashboard", "")
128 if addr == w.host and port == str(bokeh_port):
129 return True
130 return False
131
132
133 routes: list[tuple] = [(r"proxy/(\d+)/(.*?)/(.*)", GlobalProxyHandler, {})]
134
[end of distributed/http/proxy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/http/proxy.py b/distributed/http/proxy.py
--- a/distributed/http/proxy.py
+++ b/distributed/http/proxy.py
@@ -19,6 +19,15 @@
self.scheduler = dask_server
self.extra = extra or {}
+ # `get_current_user` and `prepare` method impls reference
+ # issue in tornado & jupyter server compat here
+ # https://github.com/jupyter-server/jupyter_server/issues/1012
+ def get_current_user(self):
+ return "dask"
+
+ async def prepare(self):
+ web.authenticated(lambda rq: None)(self)
+
async def http_get(self, port, host, proxied_path):
# route here first
# incoming URI /proxy/{port}/{host}/{proxied_path}
@@ -29,6 +38,9 @@
uri = f"/proxy/{port}/{proxied_path}"
self.request.uri = uri
+ if self.host not in self.host_allowlist:
+ self.host_allowlist.append(self.host)
+
# slash is removed during regex in handler
proxied_path = "/%s" % proxied_path
@@ -41,6 +53,8 @@
return await self.proxy(port, proxied_path)
async def open(self, port, host, proxied_path):
+ if host not in self.host_allowlist:
+ self.host_allowlist.append(host)
# finally, proxy to other address/port
return await self.proxy_open(host, port, proxied_path)
| {"golden_diff": "diff --git a/distributed/http/proxy.py b/distributed/http/proxy.py\n--- a/distributed/http/proxy.py\n+++ b/distributed/http/proxy.py\n@@ -19,6 +19,15 @@\n self.scheduler = dask_server\n self.extra = extra or {}\n \n+ # `get_current_user` and `prepare` method impls reference\n+ # issue in tornado & jupyter server compat here\n+ # https://github.com/jupyter-server/jupyter_server/issues/1012\n+ def get_current_user(self):\n+ return \"dask\"\n+\n+ async def prepare(self):\n+ web.authenticated(lambda rq: None)(self)\n+\n async def http_get(self, port, host, proxied_path):\n # route here first\n # incoming URI /proxy/{port}/{host}/{proxied_path}\n@@ -29,6 +38,9 @@\n uri = f\"/proxy/{port}/{proxied_path}\"\n self.request.uri = uri\n \n+ if self.host not in self.host_allowlist:\n+ self.host_allowlist.append(self.host)\n+\n # slash is removed during regex in handler\n proxied_path = \"/%s\" % proxied_path\n \n@@ -41,6 +53,8 @@\n return await self.proxy(port, proxied_path)\n \n async def open(self, port, host, proxied_path):\n+ if host not in self.host_allowlist:\n+ self.host_allowlist.append(host)\n # finally, proxy to other address/port\n return await self.proxy_open(host, port, proxied_path)\n", "issue": "Proxy to worker dashboard not working\n\r\n\r\n```\r\ndask scheduler\r\ndask worker <scheduler addr> --dashboard\r\n```\r\n\r\nThen navigate to `http://127.0.0.1:8787/info/main/workers.html` and click on dashboard. This should proxy to the worker dashboard but instead it triggers the below exception (with a 500 error code, of course)\r\n\r\n```python-traceback\r\n/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py:607: UserWarning: The Tornado web application does not have an 'identity_provider' defined in its settings. In future releases of jupyter_server, this will be a required key for all subclasses of `JupyterHandler`. For an example, see the jupyter_server source code for how to add an identity provider to the tornado settings: https://github.com/jupyter-server/jupyter_server/blob/v2.0.0/jupyter_server/serverapp.py#L242\r\n if type(self.identity_provider) is IdentityProvider and mod_obj.__name__ != __name__:\r\n2023-11-10 14:28:01,113 - tornado.application - ERROR - Uncaught exception GET /proxy/50121/192.168.2.53/status (127.0.0.1)\r\nHTTPServerRequest(protocol='http', host='127.0.0.1:8787', method='GET', uri='/proxy/50121/192.168.2.53/status', version='HTTP/1.1', remote_ip='127.0.0.1')\r\nTraceback (most recent call last):\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1765, in _execute\r\n result = await result # type: ignore\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 622, in prepare\r\n _user = await _user\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py\", line 240, in _get_user\r\n _cookie_user = self.get_user_cookie(handler)\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py\", line 398, in get_user_cookie\r\n _user_cookie = handler.get_secure_cookie(\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 836, in get_signed_cookie\r\n self.require_setting(\"cookie_secret\", \"secure cookies\")\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1669, in require_setting\r\n raise Exception(\r\nException: You must define the 'cookie_secret' setting in your application to use secure cookies\r\n2023-11-10 14:28:01,121 - tornado.application - ERROR - Uncaught exception in write_error\r\nTraceback (most recent call last):\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1765, in _execute\r\n result = await result # type: ignore\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 622, in prepare\r\n _user = await _user\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py\", line 240, in _get_user\r\n _cookie_user = self.get_user_cookie(handler)\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/auth/identity.py\", line 398, in get_user_cookie\r\n _user_cookie = handler.get_secure_cookie(\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 836, in get_signed_cookie\r\n self.require_setting(\"cookie_secret\", \"secure cookies\")\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1669, in require_setting\r\n raise Exception(\r\nException: You must define the 'cookie_secret' setting in your application to use secure cookies\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1294, in send_error\r\n self.write_error(status_code, **kwargs)\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 719, in write_error\r\n html = self.render_template(\"%s.html\" % status_code, **ns)\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 645, in render_template\r\n ns.update(self.template_namespace)\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 655, in template_namespace\r\n logged_in=self.logged_in,\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 197, in logged_in\r\n user = self.current_user\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/tornado/web.py\", line 1420, in current_user\r\n self._current_user = self.get_current_user()\r\n File \"/Users/fjetter/miniforge3/envs/coiled-benchmarks-310/lib/python3.10/site-packages/jupyter_server/base/handlers.py\", line 176, in get_current_user\r\n raise RuntimeError(msg)\r\nRuntimeError: Calling `GlobalProxyHandler.get_current_user()` directly is deprecated in jupyter-server 2.0. Use `self.current_user` instead (works in all versions).\r\n```\r\n\r\n\r\n```\r\njupyter_server 2.10.0 pyhd8ed1ab_0 conda-forge\r\njupyter_server_terminals 0.4.4 pyhd8ed1ab_1 conda-forge\r\njupyter-server-proxy 4.1.0 pyhd8ed1ab_0 conda-forge\r\ntornado 6.3.3 py310h2aa6e3c_1 conda-forge\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\n\nfrom tornado import web\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from jupyter_server_proxy.handlers import ProxyHandler\n\n class GlobalProxyHandler(ProxyHandler):\n \"\"\"\n A tornado request handler that proxies HTTP and websockets\n from a port to any valid endpoint'.\n \"\"\"\n\n def initialize(self, dask_server=None, extra=None):\n self.scheduler = dask_server\n self.extra = extra or {}\n\n async def http_get(self, port, host, proxied_path):\n # route here first\n # incoming URI /proxy/{port}/{host}/{proxied_path}\n\n self.host = host\n\n # rewrite uri for jupyter-server-proxy handling\n uri = f\"/proxy/{port}/{proxied_path}\"\n self.request.uri = uri\n\n # slash is removed during regex in handler\n proxied_path = \"/%s\" % proxied_path\n\n worker = f\"{self.host}:{port}\"\n if not check_worker_dashboard_exits(self.scheduler, worker):\n msg = \"Worker <%s> does not exist\" % worker\n self.set_status(400)\n self.finish(msg)\n return\n return await self.proxy(port, proxied_path)\n\n async def open(self, port, host, proxied_path):\n # finally, proxy to other address/port\n return await self.proxy_open(host, port, proxied_path)\n\n def post(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def put(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def delete(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def head(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def patch(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def options(self, port, proxied_path):\n return self.proxy(port, proxied_path)\n\n def proxy(self, port, proxied_path):\n # router here second\n # returns ProxyHandler coroutine\n return super().proxy(self.host, port, proxied_path)\n\nexcept ImportError:\n logger.info(\n \"To route to workers diagnostics web server \"\n \"please install jupyter-server-proxy: \"\n \"python -m pip install jupyter-server-proxy\"\n )\n\n class GlobalProxyHandler(web.RequestHandler): # type: ignore\n \"\"\"Minimal Proxy handler when jupyter-server-proxy is not installed\"\"\"\n\n def initialize(self, dask_server=None, extra=None):\n self.server = dask_server\n self.extra = extra or {}\n\n def get(self, port, host, proxied_path):\n worker_url = f\"{host}:{port}/{proxied_path}\"\n msg = \"\"\"\n <p> Try navigating to <a href=http://{}>{}</a> for your worker dashboard </p>\n\n <p>\n Dask tried to proxy you to that page through your\n Scheduler's dashboard connection, but you don't have\n jupyter-server-proxy installed. You may want to install it\n with either conda or pip, and then restart your scheduler.\n </p>\n\n <p><pre> conda install jupyter-server-proxy -c conda-forge </pre></p>\n <p><pre> python -m pip install jupyter-server-proxy</pre></p>\n\n <p>\n The link above should work though if your workers are on a\n sufficiently open network. This is common on single machines,\n but less common in production clusters. Your IT administrators\n will know more\n </p>\n \"\"\".format(\n worker_url,\n worker_url,\n )\n self.write(msg)\n\n\ndef check_worker_dashboard_exits(scheduler, worker):\n \"\"\"Check addr:port exists as a worker in scheduler list\n\n Parameters\n ----------\n worker : str\n addr:port\n\n Returns\n -------\n bool\n \"\"\"\n addr, port = worker.split(\":\")\n workers = list(scheduler.workers.values())\n for w in workers:\n bokeh_port = w.services.get(\"dashboard\", \"\")\n if addr == w.host and port == str(bokeh_port):\n return True\n return False\n\n\nroutes: list[tuple] = [(r\"proxy/(\\d+)/(.*?)/(.*)\", GlobalProxyHandler, {})]\n", "path": "distributed/http/proxy.py"}]} | 3,520 | 348 |
gh_patches_debug_32533 | rasdani/github-patches | git_diff | pypa__setuptools-2573 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
upload_docs raises AttributeError with Sphinx 1.6
In Sphinx 1.6, `builder_target_dir` was replaced with `builder_target_dirs`, see sphinx-doc/sphinx@2afa0b6627f7e5afb188d5a60c8c4767f6250774 and sphinx-doc/sphinx#3476.
This causes an AttributeError in setuptools:
```pytb
Traceback (most recent call last):
File "setup.py", line 67, in <module>
setup(**setup_args)
File "/usr/lib/python3.5/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.5/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.5/distutils/dist.py", line 973, in run_command
cmd_obj.ensure_finalized()
File "/usr/lib/python3.5/distutils/cmd.py", line 107, in ensure_finalized
self.finalize_options()
File "/home/dmitry/.local/lib/python3.5/site-packages/setuptools/command/upload_docs.py", line 65, in finalize_options
self.target_dir = build_sphinx.builder_target_dir
File "/usr/lib/python3.5/distutils/cmd.py", line 103, in __getattr__
raise AttributeError(attr)
AttributeError: builder_target_dir
```
upload_docs raises AttributeError with Sphinx 1.6
In Sphinx 1.6, `builder_target_dir` was replaced with `builder_target_dirs`, see sphinx-doc/sphinx@2afa0b6627f7e5afb188d5a60c8c4767f6250774 and sphinx-doc/sphinx#3476.
This causes an AttributeError in setuptools:
```pytb
Traceback (most recent call last):
File "setup.py", line 67, in <module>
setup(**setup_args)
File "/usr/lib/python3.5/distutils/core.py", line 148, in setup
dist.run_commands()
File "/usr/lib/python3.5/distutils/dist.py", line 955, in run_commands
self.run_command(cmd)
File "/usr/lib/python3.5/distutils/dist.py", line 973, in run_command
cmd_obj.ensure_finalized()
File "/usr/lib/python3.5/distutils/cmd.py", line 107, in ensure_finalized
self.finalize_options()
File "/home/dmitry/.local/lib/python3.5/site-packages/setuptools/command/upload_docs.py", line 65, in finalize_options
self.target_dir = build_sphinx.builder_target_dir
File "/usr/lib/python3.5/distutils/cmd.py", line 103, in __getattr__
raise AttributeError(attr)
AttributeError: builder_target_dir
```
</issue>
<code>
[start of setuptools/command/upload_docs.py]
1 # -*- coding: utf-8 -*-
2 """upload_docs
3
4 Implements a Distutils 'upload_docs' subcommand (upload documentation to
5 PyPI's pythonhosted.org).
6 """
7
8 from base64 import standard_b64encode
9 from distutils import log
10 from distutils.errors import DistutilsOptionError
11 import os
12 import socket
13 import zipfile
14 import tempfile
15 import shutil
16 import itertools
17 import functools
18 import http.client
19 import urllib.parse
20
21 from pkg_resources import iter_entry_points
22 from .upload import upload
23
24
25 def _encode(s):
26 return s.encode('utf-8', 'surrogateescape')
27
28
29 class upload_docs(upload):
30 # override the default repository as upload_docs isn't
31 # supported by Warehouse (and won't be).
32 DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
33
34 description = 'Upload documentation to PyPI'
35
36 user_options = [
37 ('repository=', 'r',
38 "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
39 ('show-response', None,
40 'display full response text from server'),
41 ('upload-dir=', None, 'directory to upload'),
42 ]
43 boolean_options = upload.boolean_options
44
45 def has_sphinx(self):
46 if self.upload_dir is None:
47 for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
48 return True
49
50 sub_commands = [('build_sphinx', has_sphinx)]
51
52 def initialize_options(self):
53 upload.initialize_options(self)
54 self.upload_dir = None
55 self.target_dir = None
56
57 def finalize_options(self):
58 upload.finalize_options(self)
59 if self.upload_dir is None:
60 if self.has_sphinx():
61 build_sphinx = self.get_finalized_command('build_sphinx')
62 self.target_dir = build_sphinx.builder_target_dir
63 else:
64 build = self.get_finalized_command('build')
65 self.target_dir = os.path.join(build.build_base, 'docs')
66 else:
67 self.ensure_dirname('upload_dir')
68 self.target_dir = self.upload_dir
69 if 'pypi.python.org' in self.repository:
70 log.warn("Upload_docs command is deprecated. Use RTD instead.")
71 self.announce('Using upload directory %s' % self.target_dir)
72
73 def create_zipfile(self, filename):
74 zip_file = zipfile.ZipFile(filename, "w")
75 try:
76 self.mkpath(self.target_dir) # just in case
77 for root, dirs, files in os.walk(self.target_dir):
78 if root == self.target_dir and not files:
79 tmpl = "no files found in upload directory '%s'"
80 raise DistutilsOptionError(tmpl % self.target_dir)
81 for name in files:
82 full = os.path.join(root, name)
83 relative = root[len(self.target_dir):].lstrip(os.path.sep)
84 dest = os.path.join(relative, name)
85 zip_file.write(full, dest)
86 finally:
87 zip_file.close()
88
89 def run(self):
90 # Run sub commands
91 for cmd_name in self.get_sub_commands():
92 self.run_command(cmd_name)
93
94 tmp_dir = tempfile.mkdtemp()
95 name = self.distribution.metadata.get_name()
96 zip_file = os.path.join(tmp_dir, "%s.zip" % name)
97 try:
98 self.create_zipfile(zip_file)
99 self.upload_file(zip_file)
100 finally:
101 shutil.rmtree(tmp_dir)
102
103 @staticmethod
104 def _build_part(item, sep_boundary):
105 key, values = item
106 title = '\nContent-Disposition: form-data; name="%s"' % key
107 # handle multiple entries for the same name
108 if not isinstance(values, list):
109 values = [values]
110 for value in values:
111 if isinstance(value, tuple):
112 title += '; filename="%s"' % value[0]
113 value = value[1]
114 else:
115 value = _encode(value)
116 yield sep_boundary
117 yield _encode(title)
118 yield b"\n\n"
119 yield value
120 if value and value[-1:] == b'\r':
121 yield b'\n' # write an extra newline (lurve Macs)
122
123 @classmethod
124 def _build_multipart(cls, data):
125 """
126 Build up the MIME payload for the POST data
127 """
128 boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
129 sep_boundary = b'\n--' + boundary.encode('ascii')
130 end_boundary = sep_boundary + b'--'
131 end_items = end_boundary, b"\n",
132 builder = functools.partial(
133 cls._build_part,
134 sep_boundary=sep_boundary,
135 )
136 part_groups = map(builder, data.items())
137 parts = itertools.chain.from_iterable(part_groups)
138 body_items = itertools.chain(parts, end_items)
139 content_type = 'multipart/form-data; boundary=%s' % boundary
140 return b''.join(body_items), content_type
141
142 def upload_file(self, filename):
143 with open(filename, 'rb') as f:
144 content = f.read()
145 meta = self.distribution.metadata
146 data = {
147 ':action': 'doc_upload',
148 'name': meta.get_name(),
149 'content': (os.path.basename(filename), content),
150 }
151 # set up the authentication
152 credentials = _encode(self.username + ':' + self.password)
153 credentials = standard_b64encode(credentials).decode('ascii')
154 auth = "Basic " + credentials
155
156 body, ct = self._build_multipart(data)
157
158 msg = "Submitting documentation to %s" % (self.repository)
159 self.announce(msg, log.INFO)
160
161 # build the Request
162 # We can't use urllib2 since we need to send the Basic
163 # auth right with the first request
164 schema, netloc, url, params, query, fragments = \
165 urllib.parse.urlparse(self.repository)
166 assert not params and not query and not fragments
167 if schema == 'http':
168 conn = http.client.HTTPConnection(netloc)
169 elif schema == 'https':
170 conn = http.client.HTTPSConnection(netloc)
171 else:
172 raise AssertionError("unsupported schema " + schema)
173
174 data = ''
175 try:
176 conn.connect()
177 conn.putrequest("POST", url)
178 content_type = ct
179 conn.putheader('Content-type', content_type)
180 conn.putheader('Content-length', str(len(body)))
181 conn.putheader('Authorization', auth)
182 conn.endheaders()
183 conn.send(body)
184 except socket.error as e:
185 self.announce(str(e), log.ERROR)
186 return
187
188 r = conn.getresponse()
189 if r.status == 200:
190 msg = 'Server response (%s): %s' % (r.status, r.reason)
191 self.announce(msg, log.INFO)
192 elif r.status == 301:
193 location = r.getheader('Location')
194 if location is None:
195 location = 'https://pythonhosted.org/%s/' % meta.get_name()
196 msg = 'Upload successful. Visit %s' % location
197 self.announce(msg, log.INFO)
198 else:
199 msg = 'Upload failed (%s): %s' % (r.status, r.reason)
200 self.announce(msg, log.ERROR)
201 if self.show_response:
202 print('-' * 75, r.read(), '-' * 75)
203
[end of setuptools/command/upload_docs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setuptools/command/upload_docs.py b/setuptools/command/upload_docs.py
--- a/setuptools/command/upload_docs.py
+++ b/setuptools/command/upload_docs.py
@@ -2,7 +2,7 @@
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
-PyPI's pythonhosted.org).
+sites other than PyPi such as devpi).
"""
from base64 import standard_b64encode
@@ -31,7 +31,7 @@
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
- description = 'Upload documentation to PyPI'
+ description = 'Upload documentation to sites other than PyPi such as devpi'
user_options = [
('repository=', 'r',
@@ -59,7 +59,7 @@
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
- self.target_dir = build_sphinx.builder_target_dir
+ self.target_dir = dict(build_sphinx.builder_target_dirs)['html']
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
@@ -67,7 +67,7 @@
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
- log.warn("Upload_docs command is deprecated. Use RTD instead.")
+ log.warn("Upload_docs command is deprecated for PyPi. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
| {"golden_diff": "diff --git a/setuptools/command/upload_docs.py b/setuptools/command/upload_docs.py\n--- a/setuptools/command/upload_docs.py\n+++ b/setuptools/command/upload_docs.py\n@@ -2,7 +2,7 @@\n \"\"\"upload_docs\n \n Implements a Distutils 'upload_docs' subcommand (upload documentation to\n-PyPI's pythonhosted.org).\n+sites other than PyPi such as devpi).\n \"\"\"\n \n from base64 import standard_b64encode\n@@ -31,7 +31,7 @@\n # supported by Warehouse (and won't be).\n DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'\n \n- description = 'Upload documentation to PyPI'\n+ description = 'Upload documentation to sites other than PyPi such as devpi'\n \n user_options = [\n ('repository=', 'r',\n@@ -59,7 +59,7 @@\n if self.upload_dir is None:\n if self.has_sphinx():\n build_sphinx = self.get_finalized_command('build_sphinx')\n- self.target_dir = build_sphinx.builder_target_dir\n+ self.target_dir = dict(build_sphinx.builder_target_dirs)['html']\n else:\n build = self.get_finalized_command('build')\n self.target_dir = os.path.join(build.build_base, 'docs')\n@@ -67,7 +67,7 @@\n self.ensure_dirname('upload_dir')\n self.target_dir = self.upload_dir\n if 'pypi.python.org' in self.repository:\n- log.warn(\"Upload_docs command is deprecated. Use RTD instead.\")\n+ log.warn(\"Upload_docs command is deprecated for PyPi. Use RTD instead.\")\n self.announce('Using upload directory %s' % self.target_dir)\n \n def create_zipfile(self, filename):\n", "issue": "upload_docs raises AttributeError with Sphinx 1.6\nIn Sphinx 1.6, `builder_target_dir` was replaced with `builder_target_dirs`, see sphinx-doc/sphinx@2afa0b6627f7e5afb188d5a60c8c4767f6250774 and sphinx-doc/sphinx#3476.\r\n\r\nThis causes an AttributeError in setuptools:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 67, in <module>\r\n setup(**setup_args)\r\n File \"/usr/lib/python3.5/distutils/core.py\", line 148, in setup\r\n dist.run_commands()\r\n File \"/usr/lib/python3.5/distutils/dist.py\", line 955, in run_commands\r\n self.run_command(cmd)\r\n File \"/usr/lib/python3.5/distutils/dist.py\", line 973, in run_command\r\n cmd_obj.ensure_finalized()\r\n File \"/usr/lib/python3.5/distutils/cmd.py\", line 107, in ensure_finalized\r\n self.finalize_options()\r\n File \"/home/dmitry/.local/lib/python3.5/site-packages/setuptools/command/upload_docs.py\", line 65, in finalize_options\r\n self.target_dir = build_sphinx.builder_target_dir\r\n File \"/usr/lib/python3.5/distutils/cmd.py\", line 103, in __getattr__\r\n raise AttributeError(attr)\r\nAttributeError: builder_target_dir\r\n```\nupload_docs raises AttributeError with Sphinx 1.6\nIn Sphinx 1.6, `builder_target_dir` was replaced with `builder_target_dirs`, see sphinx-doc/sphinx@2afa0b6627f7e5afb188d5a60c8c4767f6250774 and sphinx-doc/sphinx#3476.\r\n\r\nThis causes an AttributeError in setuptools:\r\n\r\n```pytb\r\nTraceback (most recent call last):\r\n File \"setup.py\", line 67, in <module>\r\n setup(**setup_args)\r\n File \"/usr/lib/python3.5/distutils/core.py\", line 148, in setup\r\n dist.run_commands()\r\n File \"/usr/lib/python3.5/distutils/dist.py\", line 955, in run_commands\r\n self.run_command(cmd)\r\n File \"/usr/lib/python3.5/distutils/dist.py\", line 973, in run_command\r\n cmd_obj.ensure_finalized()\r\n File \"/usr/lib/python3.5/distutils/cmd.py\", line 107, in ensure_finalized\r\n self.finalize_options()\r\n File \"/home/dmitry/.local/lib/python3.5/site-packages/setuptools/command/upload_docs.py\", line 65, in finalize_options\r\n self.target_dir = build_sphinx.builder_target_dir\r\n File \"/usr/lib/python3.5/distutils/cmd.py\", line 103, in __getattr__\r\n raise AttributeError(attr)\r\nAttributeError: builder_target_dir\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"upload_docs\n\nImplements a Distutils 'upload_docs' subcommand (upload documentation to\nPyPI's pythonhosted.org).\n\"\"\"\n\nfrom base64 import standard_b64encode\nfrom distutils import log\nfrom distutils.errors import DistutilsOptionError\nimport os\nimport socket\nimport zipfile\nimport tempfile\nimport shutil\nimport itertools\nimport functools\nimport http.client\nimport urllib.parse\n\nfrom pkg_resources import iter_entry_points\nfrom .upload import upload\n\n\ndef _encode(s):\n return s.encode('utf-8', 'surrogateescape')\n\n\nclass upload_docs(upload):\n # override the default repository as upload_docs isn't\n # supported by Warehouse (and won't be).\n DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'\n\n description = 'Upload documentation to PyPI'\n\n user_options = [\n ('repository=', 'r',\n \"url of repository [default: %s]\" % upload.DEFAULT_REPOSITORY),\n ('show-response', None,\n 'display full response text from server'),\n ('upload-dir=', None, 'directory to upload'),\n ]\n boolean_options = upload.boolean_options\n\n def has_sphinx(self):\n if self.upload_dir is None:\n for ep in iter_entry_points('distutils.commands', 'build_sphinx'):\n return True\n\n sub_commands = [('build_sphinx', has_sphinx)]\n\n def initialize_options(self):\n upload.initialize_options(self)\n self.upload_dir = None\n self.target_dir = None\n\n def finalize_options(self):\n upload.finalize_options(self)\n if self.upload_dir is None:\n if self.has_sphinx():\n build_sphinx = self.get_finalized_command('build_sphinx')\n self.target_dir = build_sphinx.builder_target_dir\n else:\n build = self.get_finalized_command('build')\n self.target_dir = os.path.join(build.build_base, 'docs')\n else:\n self.ensure_dirname('upload_dir')\n self.target_dir = self.upload_dir\n if 'pypi.python.org' in self.repository:\n log.warn(\"Upload_docs command is deprecated. Use RTD instead.\")\n self.announce('Using upload directory %s' % self.target_dir)\n\n def create_zipfile(self, filename):\n zip_file = zipfile.ZipFile(filename, \"w\")\n try:\n self.mkpath(self.target_dir) # just in case\n for root, dirs, files in os.walk(self.target_dir):\n if root == self.target_dir and not files:\n tmpl = \"no files found in upload directory '%s'\"\n raise DistutilsOptionError(tmpl % self.target_dir)\n for name in files:\n full = os.path.join(root, name)\n relative = root[len(self.target_dir):].lstrip(os.path.sep)\n dest = os.path.join(relative, name)\n zip_file.write(full, dest)\n finally:\n zip_file.close()\n\n def run(self):\n # Run sub commands\n for cmd_name in self.get_sub_commands():\n self.run_command(cmd_name)\n\n tmp_dir = tempfile.mkdtemp()\n name = self.distribution.metadata.get_name()\n zip_file = os.path.join(tmp_dir, \"%s.zip\" % name)\n try:\n self.create_zipfile(zip_file)\n self.upload_file(zip_file)\n finally:\n shutil.rmtree(tmp_dir)\n\n @staticmethod\n def _build_part(item, sep_boundary):\n key, values = item\n title = '\\nContent-Disposition: form-data; name=\"%s\"' % key\n # handle multiple entries for the same name\n if not isinstance(values, list):\n values = [values]\n for value in values:\n if isinstance(value, tuple):\n title += '; filename=\"%s\"' % value[0]\n value = value[1]\n else:\n value = _encode(value)\n yield sep_boundary\n yield _encode(title)\n yield b\"\\n\\n\"\n yield value\n if value and value[-1:] == b'\\r':\n yield b'\\n' # write an extra newline (lurve Macs)\n\n @classmethod\n def _build_multipart(cls, data):\n \"\"\"\n Build up the MIME payload for the POST data\n \"\"\"\n boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'\n sep_boundary = b'\\n--' + boundary.encode('ascii')\n end_boundary = sep_boundary + b'--'\n end_items = end_boundary, b\"\\n\",\n builder = functools.partial(\n cls._build_part,\n sep_boundary=sep_boundary,\n )\n part_groups = map(builder, data.items())\n parts = itertools.chain.from_iterable(part_groups)\n body_items = itertools.chain(parts, end_items)\n content_type = 'multipart/form-data; boundary=%s' % boundary\n return b''.join(body_items), content_type\n\n def upload_file(self, filename):\n with open(filename, 'rb') as f:\n content = f.read()\n meta = self.distribution.metadata\n data = {\n ':action': 'doc_upload',\n 'name': meta.get_name(),\n 'content': (os.path.basename(filename), content),\n }\n # set up the authentication\n credentials = _encode(self.username + ':' + self.password)\n credentials = standard_b64encode(credentials).decode('ascii')\n auth = \"Basic \" + credentials\n\n body, ct = self._build_multipart(data)\n\n msg = \"Submitting documentation to %s\" % (self.repository)\n self.announce(msg, log.INFO)\n\n # build the Request\n # We can't use urllib2 since we need to send the Basic\n # auth right with the first request\n schema, netloc, url, params, query, fragments = \\\n urllib.parse.urlparse(self.repository)\n assert not params and not query and not fragments\n if schema == 'http':\n conn = http.client.HTTPConnection(netloc)\n elif schema == 'https':\n conn = http.client.HTTPSConnection(netloc)\n else:\n raise AssertionError(\"unsupported schema \" + schema)\n\n data = ''\n try:\n conn.connect()\n conn.putrequest(\"POST\", url)\n content_type = ct\n conn.putheader('Content-type', content_type)\n conn.putheader('Content-length', str(len(body)))\n conn.putheader('Authorization', auth)\n conn.endheaders()\n conn.send(body)\n except socket.error as e:\n self.announce(str(e), log.ERROR)\n return\n\n r = conn.getresponse()\n if r.status == 200:\n msg = 'Server response (%s): %s' % (r.status, r.reason)\n self.announce(msg, log.INFO)\n elif r.status == 301:\n location = r.getheader('Location')\n if location is None:\n location = 'https://pythonhosted.org/%s/' % meta.get_name()\n msg = 'Upload successful. Visit %s' % location\n self.announce(msg, log.INFO)\n else:\n msg = 'Upload failed (%s): %s' % (r.status, r.reason)\n self.announce(msg, log.ERROR)\n if self.show_response:\n print('-' * 75, r.read(), '-' * 75)\n", "path": "setuptools/command/upload_docs.py"}]} | 3,278 | 382 |
gh_patches_debug_41428 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-13415 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
www.watchindianporn.net parser is broken
- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.06.12**
### Before submitting an *issue* make sure you have:
- [x] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [x] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
```
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['--verbose', '-iw', '--write-description', '-R', '10', '-o', '%(title)s-
%(id)s.%(ext)s', '-a', 'youtube-dl.txt', '--external-downloader', 'curl', '--external-downloader-arg
s', '-C - -L']
[debug] Batch file urls: ['http://www.watchindianporn.net/video/up-bhoji-lifting-her-saree-and-expos
ing-her-dirty-gaand-qsnHOGU7Ey1.html']
[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252
[debug] youtube-dl version 2017.06.12
[debug] Python version 3.4.4 - Windows-7-6.1.7601-SP1
[debug] exe versions: none
[debug] Proxy map: {}
[WatchIndianPorn] up-bhoji-lifting-her-saree-and-exposing-her-dirty-gaand: Downloading webpage
ERROR: Unable to extract url; please report this issue on https://yt-dl.org/bug . Make sure you are
using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verb
ose flag and include its complete output.
Traceback (most recent call last):
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpkyaecyzu\build\youtube_dl\Youtu
beDL.py", line 762, in extract_info
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpkyaecyzu\build\youtube_dl\extra
ctor\common.py", line 433, in extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpkyaecyzu\build\youtube_dl\extra
ctor\watchindianporn.py", line 44, in _real_extract
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpkyaecyzu\build\youtube_dl\extra
ctor\common.py", line 791, in _html_search_regex
File "C:\Users\dst\AppData\Roaming\Build archive\youtube-dl\rg3\tmpkyaecyzu\build\youtube_dl\extra
ctor\common.py", line 782, in _search_regex
youtube_dl.utils.RegexNotFoundError: Unable to extract url; please report this issue on https://yt-d
l.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to c
all youtube-dl with the --verbose flag and include its complete output.
```
---
www.watchindianporn.net parser is broken. console log submitted above.
</issue>
<code>
[start of youtube_dl/extractor/watchindianporn.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import re
5
6 from .common import InfoExtractor
7 from ..utils import (
8 unified_strdate,
9 parse_duration,
10 int_or_none,
11 )
12
13
14 class WatchIndianPornIE(InfoExtractor):
15 IE_DESC = 'Watch Indian Porn'
16 _VALID_URL = r'https?://(?:www\.)?watchindianporn\.net/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\.html'
17 _TEST = {
18 'url': 'http://www.watchindianporn.net/video/hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera-RZa2avywNPa.html',
19 'md5': '249589a164dde236ec65832bfce17440',
20 'info_dict': {
21 'id': 'RZa2avywNPa',
22 'display_id': 'hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera',
23 'ext': 'mp4',
24 'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera',
25 'thumbnail': r're:^https?://.*\.jpg$',
26 'uploader': 'LoveJay',
27 'upload_date': '20160428',
28 'duration': 226,
29 'view_count': int,
30 'comment_count': int,
31 'categories': list,
32 'age_limit': 18,
33 }
34 }
35
36 def _real_extract(self, url):
37 mobj = re.match(self._VALID_URL, url)
38 video_id = mobj.group('id')
39 display_id = mobj.group('display_id')
40
41 webpage = self._download_webpage(url, display_id)
42
43 video_url = self._html_search_regex(
44 r"url: escape\('([^']+)'\)", webpage, 'url')
45
46 title = self._html_search_regex(
47 r'<h2 class="he2"><span>(.*?)</span>',
48 webpage, 'title')
49 thumbnail = self._html_search_regex(
50 r'<span id="container"><img\s+src="([^"]+)"',
51 webpage, 'thumbnail', fatal=False)
52
53 uploader = self._html_search_regex(
54 r'class="aupa">\s*(.*?)</a>',
55 webpage, 'uploader')
56 upload_date = unified_strdate(self._html_search_regex(
57 r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
58
59 duration = parse_duration(self._search_regex(
60 r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
61 webpage, 'duration', fatal=False))
62
63 view_count = int_or_none(self._search_regex(
64 r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
65 webpage, 'view count', fatal=False))
66 comment_count = int_or_none(self._search_regex(
67 r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
68 webpage, 'comment count', fatal=False))
69
70 categories = re.findall(
71 r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
72 webpage)
73
74 return {
75 'id': video_id,
76 'display_id': display_id,
77 'url': video_url,
78 'http_headers': {
79 'Referer': url,
80 },
81 'title': title,
82 'thumbnail': thumbnail,
83 'uploader': uploader,
84 'upload_date': upload_date,
85 'duration': duration,
86 'view_count': view_count,
87 'comment_count': comment_count,
88 'categories': categories,
89 'age_limit': 18,
90 }
91
[end of youtube_dl/extractor/watchindianporn.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/watchindianporn.py b/youtube_dl/extractor/watchindianporn.py
--- a/youtube_dl/extractor/watchindianporn.py
+++ b/youtube_dl/extractor/watchindianporn.py
@@ -4,11 +4,7 @@
import re
from .common import InfoExtractor
-from ..utils import (
- unified_strdate,
- parse_duration,
- int_or_none,
-)
+from ..utils import parse_duration
class WatchIndianPornIE(InfoExtractor):
@@ -23,11 +19,8 @@
'ext': 'mp4',
'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera',
'thumbnail': r're:^https?://.*\.jpg$',
- 'uploader': 'LoveJay',
- 'upload_date': '20160428',
'duration': 226,
'view_count': int,
- 'comment_count': int,
'categories': list,
'age_limit': 18,
}
@@ -40,51 +33,36 @@
webpage = self._download_webpage(url, display_id)
- video_url = self._html_search_regex(
- r"url: escape\('([^']+)'\)", webpage, 'url')
+ info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
- title = self._html_search_regex(
- r'<h2 class="he2"><span>(.*?)</span>',
- webpage, 'title')
- thumbnail = self._html_search_regex(
- r'<span id="container"><img\s+src="([^"]+)"',
- webpage, 'thumbnail', fatal=False)
-
- uploader = self._html_search_regex(
- r'class="aupa">\s*(.*?)</a>',
- webpage, 'uploader')
- upload_date = unified_strdate(self._html_search_regex(
- r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))
+ title = self._html_search_regex((
+ r'<title>(.+?)\s*-\s*Indian\s+Porn</title>',
+ r'<h4>(.+?)</h4>'
+ ), webpage, 'title')
duration = parse_duration(self._search_regex(
- r'<td>Time:\s*</td>\s*<td align="right"><span>\s*(.+?)\s*</span>',
+ r'Time:\s*<strong>\s*(.+?)\s*</strong>',
webpage, 'duration', fatal=False))
- view_count = int_or_none(self._search_regex(
- r'<td>Views:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
+ view_count = int(self._search_regex(
+ r'(?s)Time:\s*<strong>.*?</strong>.*?<strong>\s*(\d+)\s*</strong>',
webpage, 'view count', fatal=False))
- comment_count = int_or_none(self._search_regex(
- r'<td>Comments:\s*</td>\s*<td align="right"><span>\s*(\d+)\s*</span>',
- webpage, 'comment count', fatal=False))
categories = re.findall(
- r'<a href="[^"]+/search/video/desi"><span>([^<]+)</span></a>',
+ r'<a[^>]+class=[\'"]categories[\'"][^>]*>\s*([^<]+)\s*</a>',
webpage)
- return {
+ info_dict.update({
'id': video_id,
'display_id': display_id,
- 'url': video_url,
'http_headers': {
'Referer': url,
},
'title': title,
- 'thumbnail': thumbnail,
- 'uploader': uploader,
- 'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
- 'comment_count': comment_count,
'categories': categories,
'age_limit': 18,
- }
+ })
+
+ return info_dict
| {"golden_diff": "diff --git a/youtube_dl/extractor/watchindianporn.py b/youtube_dl/extractor/watchindianporn.py\n--- a/youtube_dl/extractor/watchindianporn.py\n+++ b/youtube_dl/extractor/watchindianporn.py\n@@ -4,11 +4,7 @@\n import re\n \n from .common import InfoExtractor\n-from ..utils import (\n- unified_strdate,\n- parse_duration,\n- int_or_none,\n-)\n+from ..utils import parse_duration\n \n \n class WatchIndianPornIE(InfoExtractor):\n@@ -23,11 +19,8 @@\n 'ext': 'mp4',\n 'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n- 'uploader': 'LoveJay',\n- 'upload_date': '20160428',\n 'duration': 226,\n 'view_count': int,\n- 'comment_count': int,\n 'categories': list,\n 'age_limit': 18,\n }\n@@ -40,51 +33,36 @@\n \n webpage = self._download_webpage(url, display_id)\n \n- video_url = self._html_search_regex(\n- r\"url: escape\\('([^']+)'\\)\", webpage, 'url')\n+ info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]\n \n- title = self._html_search_regex(\n- r'<h2 class=\"he2\"><span>(.*?)</span>',\n- webpage, 'title')\n- thumbnail = self._html_search_regex(\n- r'<span id=\"container\"><img\\s+src=\"([^\"]+)\"',\n- webpage, 'thumbnail', fatal=False)\n-\n- uploader = self._html_search_regex(\n- r'class=\"aupa\">\\s*(.*?)</a>',\n- webpage, 'uploader')\n- upload_date = unified_strdate(self._html_search_regex(\n- r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))\n+ title = self._html_search_regex((\n+ r'<title>(.+?)\\s*-\\s*Indian\\s+Porn</title>',\n+ r'<h4>(.+?)</h4>'\n+ ), webpage, 'title')\n \n duration = parse_duration(self._search_regex(\n- r'<td>Time:\\s*</td>\\s*<td align=\"right\"><span>\\s*(.+?)\\s*</span>',\n+ r'Time:\\s*<strong>\\s*(.+?)\\s*</strong>',\n webpage, 'duration', fatal=False))\n \n- view_count = int_or_none(self._search_regex(\n- r'<td>Views:\\s*</td>\\s*<td align=\"right\"><span>\\s*(\\d+)\\s*</span>',\n+ view_count = int(self._search_regex(\n+ r'(?s)Time:\\s*<strong>.*?</strong>.*?<strong>\\s*(\\d+)\\s*</strong>',\n webpage, 'view count', fatal=False))\n- comment_count = int_or_none(self._search_regex(\n- r'<td>Comments:\\s*</td>\\s*<td align=\"right\"><span>\\s*(\\d+)\\s*</span>',\n- webpage, 'comment count', fatal=False))\n \n categories = re.findall(\n- r'<a href=\"[^\"]+/search/video/desi\"><span>([^<]+)</span></a>',\n+ r'<a[^>]+class=[\\'\"]categories[\\'\"][^>]*>\\s*([^<]+)\\s*</a>',\n webpage)\n \n- return {\n+ info_dict.update({\n 'id': video_id,\n 'display_id': display_id,\n- 'url': video_url,\n 'http_headers': {\n 'Referer': url,\n },\n 'title': title,\n- 'thumbnail': thumbnail,\n- 'uploader': uploader,\n- 'upload_date': upload_date,\n 'duration': duration,\n 'view_count': view_count,\n- 'comment_count': comment_count,\n 'categories': categories,\n 'age_limit': 18,\n- }\n+ })\n+\n+ return info_dict\n", "issue": "www.watchindianporn.net parser is broken\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.06.12**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [x] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n---\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['--verbose', '-iw', '--write-description', '-R', '10', '-o', '%(title)s-\r\n%(id)s.%(ext)s', '-a', 'youtube-dl.txt', '--external-downloader', 'curl', '--external-downloader-arg\r\ns', '-C - -L']\r\n[debug] Batch file urls: ['http://www.watchindianporn.net/video/up-bhoji-lifting-her-saree-and-expos\r\ning-her-dirty-gaand-qsnHOGU7Ey1.html']\r\n[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252\r\n[debug] youtube-dl version 2017.06.12\r\n[debug] Python version 3.4.4 - Windows-7-6.1.7601-SP1\r\n[debug] exe versions: none\r\n[debug] Proxy map: {}\r\n[WatchIndianPorn] up-bhoji-lifting-her-saree-and-exposing-her-dirty-gaand: Downloading webpage\r\nERROR: Unable to extract url; please report this issue on https://yt-dl.org/bug . Make sure you are\r\nusing the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verb\r\nose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpkyaecyzu\\build\\youtube_dl\\Youtu\r\nbeDL.py\", line 762, in extract_info\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpkyaecyzu\\build\\youtube_dl\\extra\r\nctor\\common.py\", line 433, in extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpkyaecyzu\\build\\youtube_dl\\extra\r\nctor\\watchindianporn.py\", line 44, in _real_extract\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpkyaecyzu\\build\\youtube_dl\\extra\r\nctor\\common.py\", line 791, in _html_search_regex\r\n File \"C:\\Users\\dst\\AppData\\Roaming\\Build archive\\youtube-dl\\rg3\\tmpkyaecyzu\\build\\youtube_dl\\extra\r\nctor\\common.py\", line 782, in _search_regex\r\nyoutube_dl.utils.RegexNotFoundError: Unable to extract url; please report this issue on https://yt-d\r\nl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to c\r\nall youtube-dl with the --verbose flag and include its complete output.\r\n```\r\n---\r\nwww.watchindianporn.net parser is broken. console log submitted above.\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n unified_strdate,\n parse_duration,\n int_or_none,\n)\n\n\nclass WatchIndianPornIE(InfoExtractor):\n IE_DESC = 'Watch Indian Porn'\n _VALID_URL = r'https?://(?:www\\.)?watchindianporn\\.net/(?:[^/]+/)*video/(?P<display_id>[^/]+)-(?P<id>[a-zA-Z0-9]+)\\.html'\n _TEST = {\n 'url': 'http://www.watchindianporn.net/video/hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera-RZa2avywNPa.html',\n 'md5': '249589a164dde236ec65832bfce17440',\n 'info_dict': {\n 'id': 'RZa2avywNPa',\n 'display_id': 'hot-milf-from-kerala-shows-off-her-gorgeous-large-breasts-on-camera',\n 'ext': 'mp4',\n 'title': 'Hot milf from kerala shows off her gorgeous large breasts on camera',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'uploader': 'LoveJay',\n 'upload_date': '20160428',\n 'duration': 226,\n 'view_count': int,\n 'comment_count': int,\n 'categories': list,\n 'age_limit': 18,\n }\n }\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n display_id = mobj.group('display_id')\n\n webpage = self._download_webpage(url, display_id)\n\n video_url = self._html_search_regex(\n r\"url: escape\\('([^']+)'\\)\", webpage, 'url')\n\n title = self._html_search_regex(\n r'<h2 class=\"he2\"><span>(.*?)</span>',\n webpage, 'title')\n thumbnail = self._html_search_regex(\n r'<span id=\"container\"><img\\s+src=\"([^\"]+)\"',\n webpage, 'thumbnail', fatal=False)\n\n uploader = self._html_search_regex(\n r'class=\"aupa\">\\s*(.*?)</a>',\n webpage, 'uploader')\n upload_date = unified_strdate(self._html_search_regex(\n r'Added: <strong>(.+?)</strong>', webpage, 'upload date', fatal=False))\n\n duration = parse_duration(self._search_regex(\n r'<td>Time:\\s*</td>\\s*<td align=\"right\"><span>\\s*(.+?)\\s*</span>',\n webpage, 'duration', fatal=False))\n\n view_count = int_or_none(self._search_regex(\n r'<td>Views:\\s*</td>\\s*<td align=\"right\"><span>\\s*(\\d+)\\s*</span>',\n webpage, 'view count', fatal=False))\n comment_count = int_or_none(self._search_regex(\n r'<td>Comments:\\s*</td>\\s*<td align=\"right\"><span>\\s*(\\d+)\\s*</span>',\n webpage, 'comment count', fatal=False))\n\n categories = re.findall(\n r'<a href=\"[^\"]+/search/video/desi\"><span>([^<]+)</span></a>',\n webpage)\n\n return {\n 'id': video_id,\n 'display_id': display_id,\n 'url': video_url,\n 'http_headers': {\n 'Referer': url,\n },\n 'title': title,\n 'thumbnail': thumbnail,\n 'uploader': uploader,\n 'upload_date': upload_date,\n 'duration': duration,\n 'view_count': view_count,\n 'comment_count': comment_count,\n 'categories': categories,\n 'age_limit': 18,\n }\n", "path": "youtube_dl/extractor/watchindianporn.py"}]} | 2,503 | 931 |
gh_patches_debug_14145 | rasdani/github-patches | git_diff | facebookresearch__nevergrad-11 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hello-world install question
## Steps to reproduce
Install nevergrad
```
pip3 install -e [email protected]:facebookresearch/nevergrad@master#egg=nevergrad
Obtaining nevergrad from [email protected]:facebookresearch/nevergrad@master#egg=nevergrad
Cloning [email protected]:facebookresearch/nevergrad (to revision master) to ./src/nevergrad
Warning: Permanently added the RSA host key for IP address '192.30.255.112' to the list of known hosts.
[email protected]: Permission denied (publickey).
fatal: Could not read from remote repository.
Please make sure you have the correct access rights
and the repository exists.
Command "git clone -q [email protected]:facebookresearch/nevergrad /Users/ME/Documents/workspace/temp/src/nevergrad" failed with error code 128 in None
```
ok. trying git clone and `python3 setup.py install` method. That seems to work.
run the sample program:
python3 mynevergrad.py
```
from nevergrad.optimization import optimizerlib
def square(x):
return (x - .5)**2
optimizer = optimizerlib.OnePlusOne(dimension=1, budget=100, num_workers=5)
recommendation = optimizer.optimize(square, executor=None, batch_mode=True)
```
## Observed Results
```
Traceback (most recent call last):
File "mynevergrad.py", line 6, in <module>
from nevergrad.optimization import optimizerlib
ModuleNotFoundError: No module named 'nevergrad.optimization'
```
## Expected Results
It should run the sample
## Relevant Code
```
import pkg_resources
for d in pkg_resources.working_set:
print(d)
```
DOES include `nevergrad 0.1.0`
This is very likely an install, python3, homebrew "installed in user directory", or paths issue, but given that `nevergrad 0.1.0` shows up in the list, it is odd...
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
3 #
4 # This source code is licensed under the MIT license found in the
5 # LICENSE file in the root directory of this source tree.
6
7 # from distutils.core import setup
8 from setuptools import setup
9
10
11 with open('requirements.txt') as f:
12 requirements = f.read().splitlines()
13
14
15 setup(name='nevergrad',
16 version='0.1.0',
17 description='Gradient-free optimization toolbox',
18 author='Facebook AI Research',
19 packages=['nevergrad'],
20 install_requires=requirements,)
21
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,14 +7,22 @@
# from distutils.core import setup
from setuptools import setup
-
with open('requirements.txt') as f:
requirements = f.read().splitlines()
-
-setup(name='nevergrad',
- version='0.1.0',
- description='Gradient-free optimization toolbox',
- author='Facebook AI Research',
- packages=['nevergrad'],
- install_requires=requirements,)
+setup(
+ name='nevergrad',
+ version='0.1.0',
+ description='Gradient-free optimization toolbox',
+ author='Facebook AI Research',
+ packages=[
+ 'nevergrad',
+ 'nevergrad.benchmark',
+ 'nevergrad.benchmark.additional',
+ 'nevergrad.common',
+ 'nevergrad.functions',
+ 'nevergrad.instrumentation',
+ 'nevergrad.optimization',
+ ],
+ install_requires=requirements,
+)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,14 +7,22 @@\n # from distutils.core import setup\n from setuptools import setup\n \n-\n with open('requirements.txt') as f:\n requirements = f.read().splitlines()\n \n-\n-setup(name='nevergrad',\n- version='0.1.0',\n- description='Gradient-free optimization toolbox',\n- author='Facebook AI Research',\n- packages=['nevergrad'],\n- install_requires=requirements,)\n+setup(\n+ name='nevergrad',\n+ version='0.1.0',\n+ description='Gradient-free optimization toolbox',\n+ author='Facebook AI Research',\n+ packages=[\n+ 'nevergrad',\n+ 'nevergrad.benchmark',\n+ 'nevergrad.benchmark.additional',\n+ 'nevergrad.common',\n+ 'nevergrad.functions',\n+ 'nevergrad.instrumentation',\n+ 'nevergrad.optimization',\n+ ],\n+ install_requires=requirements,\n+)\n", "issue": "Hello-world install question\n## Steps to reproduce\r\n\r\nInstall nevergrad\r\n\r\n```\r\npip3 install -e [email protected]:facebookresearch/nevergrad@master#egg=nevergrad\r\nObtaining nevergrad from [email protected]:facebookresearch/nevergrad@master#egg=nevergrad\r\n Cloning [email protected]:facebookresearch/nevergrad (to revision master) to ./src/nevergrad\r\nWarning: Permanently added the RSA host key for IP address '192.30.255.112' to the list of known hosts.\r\[email protected]: Permission denied (publickey).\r\nfatal: Could not read from remote repository.\r\n\r\nPlease make sure you have the correct access rights\r\nand the repository exists.\r\nCommand \"git clone -q [email protected]:facebookresearch/nevergrad /Users/ME/Documents/workspace/temp/src/nevergrad\" failed with error code 128 in None\r\n```\r\n\r\nok. trying git clone and `python3 setup.py install` method. That seems to work.\r\n\r\nrun the sample program:\r\n\r\npython3 mynevergrad.py\r\n\r\n```\r\nfrom nevergrad.optimization import optimizerlib\r\n\r\ndef square(x):\r\n return (x - .5)**2\r\n\r\noptimizer = optimizerlib.OnePlusOne(dimension=1, budget=100, num_workers=5)\r\nrecommendation = optimizer.optimize(square, executor=None, batch_mode=True)\r\n```\r\n\r\n\r\n## Observed Results\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"mynevergrad.py\", line 6, in <module>\r\n from nevergrad.optimization import optimizerlib\r\nModuleNotFoundError: No module named 'nevergrad.optimization'\r\n```\r\n\r\n\r\n## Expected Results\r\n\r\nIt should run the sample\r\n\r\n## Relevant Code\r\n\r\n```\r\nimport pkg_resources\r\nfor d in pkg_resources.working_set:\r\n\tprint(d)\r\n```\r\nDOES include `nevergrad 0.1.0`\r\n\r\n\r\nThis is very likely an install, python3, homebrew \"installed in user directory\", or paths issue, but given that `nevergrad 0.1.0` shows up in the list, it is odd...\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# from distutils.core import setup\nfrom setuptools import setup\n\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\n\nsetup(name='nevergrad',\n version='0.1.0',\n description='Gradient-free optimization toolbox',\n author='Facebook AI Research',\n packages=['nevergrad'],\n install_requires=requirements,)\n", "path": "setup.py"}]} | 1,143 | 219 |
gh_patches_debug_38179 | rasdani/github-patches | git_diff | XanaduAI__strawberryfields-589 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
quantum neural net example output is Nan when using more than 1 mode
#### Issue description
The quantum_neural_network.py example output is Nan when using more than 1 mode. (working as expected for 1 mode)
* *Expected behavior:* Not Nan
* *Actual behavior:* ```Beginning optimization
Rep: 0 Cost: 7.0005 Fidelity: 0.0000 Trace: 1.0000
Rep: 1 Cost: nan Fidelity: nan Trace: nan
Rep: 2 Cost: nan Fidelity: nan Trace: nan
Rep: 3 Cost: nan Fidelity: nan Trace: nan```
* *Reproduces how often:* 100%
* *System information:*
``` Strawberry Fields: a Python library for continuous-variable quantum circuits.
Copyright 2018-2020 Xanadu Quantum Technologies Inc.
Python version: 3.8.5
Platform info: Linux-5.8.0-53-generic-x86_64-with-glibc2.10
Installation path: /home/jonas/anaconda3/envs/strawberry/lib/python3.8/site-packages/strawberryfields
Strawberry Fields version: 0.17.0
Numpy version: 1.19.2
Scipy version: 1.4.1
SymPy version: 1.7.1
NetworkX version: 2.5
The Walrus version: 0.14.0
Blackbird version: 0.3.1-dev
TensorFlow version: 2.2.0
```
#### Source code and tracebacks
update line 135 of the example quantum_neural_network.py from ```modes = 1``` to ```modes = 2```
</issue>
<code>
[start of examples/quantum_neural_network.py]
1 #!/usr/bin/env python3
2 import numpy as np
3 import tensorflow as tf
4 import strawberryfields as sf
5 from strawberryfields import ops
6
7
8 # =========================================================================
9 # Utility functions
10 # =========================================================================
11
12
13 # define interferometer
14 def interferometer(params, q):
15 """Parameterised interferometer acting on ``N`` modes.
16
17 Args:
18 params (list[float]): list of length ``max(1, N-1) + (N-1)*N`` parameters.
19
20 * The first ``N(N-1)/2`` parameters correspond to the beamsplitter angles
21 * The second ``N(N-1)/2`` parameters correspond to the beamsplitter phases
22 * The final ``N-1`` parameters correspond to local rotation on the first N-1 modes
23
24 q (list[RegRef]): list of Strawberry Fields quantum registers the interferometer
25 is to be applied to
26 """
27 N = len(q)
28 theta = params[:N*(N-1)//2]
29 phi = params[N*(N-1)//2:N*(N-1)]
30 rphi = params[-N+1:]
31
32 if N == 1:
33 # the interferometer is a single rotation
34 ops.Rgate(rphi[0]) | q[0]
35 return
36
37 n = 0 # keep track of free parameters
38
39 # Apply the rectangular beamsplitter array
40 # The array depth is N
41 for l in range(N):
42 for k, (q1, q2) in enumerate(zip(q[:-1], q[1:])):
43 # skip even or odd pairs depending on layer
44 if (l + k) % 2 != 1:
45 ops.BSgate(theta[n], phi[n]) | (q1, q2)
46 n += 1
47
48 # apply the final local phase shifts to all modes except the last one
49 for i in range(max(1, N - 1)):
50 ops.Rgate(rphi[i]) | q[i]
51 # Rgate only applied to first N - 1 modes
52
53
54 # define layer
55 def layer(params, q):
56 """CV quantum neural network layer acting on ``N`` modes.
57
58 Args:
59 params (list[float]): list of length ``2*(max(1, N-1) + N**2 + n)`` containing
60 the number of parameters for the layer
61 q (list[RegRef]): list of Strawberry Fields quantum registers the layer
62 is to be applied to
63 """
64 N = len(q)
65 M = int(N * (N - 1)) + max(1, N - 1)
66
67 int1 = params[:M]
68 s = params[M:M+N]
69 int2 = params[M+N:2*M+N]
70 dr = params[2*M+N:2*M+2*N]
71 dp = params[2*M+2*N:2*M+3*N]
72 k = params[2*M+3*N:2*M+4*N]
73
74 # begin layer
75 interferometer(int1, q)
76
77 for i in range(N):
78 ops.Sgate(s[i]) | q[i]
79
80 interferometer(int2, q)
81
82 for i in range(N):
83 ops.Dgate(dr[i], dp[i]) | q[i]
84 ops.Kgate(k[i]) | q[i]
85 # end layer
86
87
88 def init_weights(modes, layers, active_sd=0.0001, passive_sd=0.1):
89 """Initialize a 2D TensorFlow Variable containing normally-distributed
90 random weights for an ``N`` mode quantum neural network with ``L`` layers.
91
92 Args:
93 modes (int): the number of modes in the quantum neural network
94 layers (int): the number of layers in the quantum neural network
95 active_sd (float): the standard deviation used when initializing
96 the normally-distributed weights for the active parameters
97 (displacement, squeezing, and Kerr magnitude)
98 passive_sd (float): the standard deviation used when initializing
99 the normally-distributed weights for the passive parameters
100 (beamsplitter angles and all gate phases)
101
102 Returns:
103 tf.Variable[tf.float32]: A TensorFlow Variable of shape
104 ``[layers, 2*(max(1, modes-1) + modes**2 + modes)]``, where the Lth
105 row represents the layer parameters for the Lth layer.
106 """
107 # Number of interferometer parameters:
108 M = int(modes * (modes - 1)) + max(1, modes - 1)
109
110 # Create the TensorFlow variables
111 int1_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)
112 s_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
113 int2_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)
114 dr_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
115 dp_weights = tf.random.normal(shape=[layers, modes], stddev=passive_sd)
116 k_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
117
118 weights = tf.concat([int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1)
119 weights = tf.Variable(weights)
120
121 return weights
122
123
124 # =========================================================================
125 # Define the optimization problem
126 # =========================================================================
127
128
129 # set the random seed
130 tf.random.set_seed(137)
131 np.random.seed(137)
132
133
134 # define width and depth of CV quantum neural network
135 modes = 1
136 layers = 8
137 cutoff_dim = 6
138
139
140 # defining desired state (single photon state)
141 target_state = np.zeros(cutoff_dim)
142 target_state[1] = 1
143 target_state = tf.constant(target_state, dtype=tf.complex64)
144
145
146 # initialize engine and program
147 eng = sf.Engine(backend="tf", backend_options={"cutoff_dim": cutoff_dim})
148 qnn = sf.Program(modes)
149
150
151 # initialize QNN weights
152 weights = init_weights(modes, layers)
153 num_params = np.prod(weights.shape)
154
155
156 # Create array of Strawberry Fields symbolic gate arguments, matching
157 # the size of the weights Variable.
158 sf_params = np.arange(num_params).reshape(weights.shape).astype(np.str)
159 sf_params = np.array([qnn.params(*i) for i in sf_params])
160
161
162 # Construct the symbolic Strawberry Fields program by
163 # looping and applying layers to the program.
164 with qnn.context as q:
165 for k in range(layers):
166 layer(sf_params[k], q)
167
168
169 def cost(weights):
170 # Create a dictionary mapping from the names of the Strawberry Fields
171 # symbolic gate parameters to the TensorFlow weight values.
172 mapping = {p.name: w for p, w in zip(sf_params.flatten(), tf.reshape(weights, [-1]))}
173
174 # run the engine
175 state = eng.run(qnn, args=mapping).state
176 ket = state.ket()
177
178 difference = tf.reduce_sum(tf.abs(ket - target_state))
179 fidelity = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state)) ** 2
180 return difference, fidelity, ket, tf.math.real(state.trace())
181
182
183 # set up optimizer
184 opt = tf.keras.optimizers.Adam()
185 cost_before, fidelity_before, _, _ = cost(weights)
186
187 print("Beginning optimization")
188
189
190 # Perform the optimization
191 for i in range(1000):
192 # reset the engine if it has already been executed
193 if eng.run_progs:
194 eng.reset()
195
196 with tf.GradientTape() as tape:
197 loss, fid, _, trace = cost(weights)
198
199 # one repetition of the optimization
200 gradients = tape.gradient(loss, weights)
201 opt.apply_gradients(zip([gradients], [weights]))
202
203 # Prints progress at every rep
204 if i % 1 == 0:
205 print("Rep: {} Cost: {:.4f} Fidelity: {:.4f} Trace: {:.4f}".format(i, loss, fid, trace))
206
207
208 cost_after, fidelity_after, ket_after, _ = cost(weights)
209
210
211 print("\nFidelity before optimization: ", fidelity_before.numpy())
212 print("Fidelity after optimization: ", fidelity_after.numpy())
213 print("\nTarget state: ", target_state.numpy())
214 print("Output state: ", np.round(ket_after.numpy(), decimals=3))
215
[end of examples/quantum_neural_network.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/quantum_neural_network.py b/examples/quantum_neural_network.py
--- a/examples/quantum_neural_network.py
+++ b/examples/quantum_neural_network.py
@@ -25,9 +25,9 @@
is to be applied to
"""
N = len(q)
- theta = params[:N*(N-1)//2]
- phi = params[N*(N-1)//2:N*(N-1)]
- rphi = params[-N+1:]
+ theta = params[: N * (N - 1) // 2]
+ phi = params[N * (N - 1) // 2 : N * (N - 1)]
+ rphi = params[-N + 1 :]
if N == 1:
# the interferometer is a single rotation
@@ -65,11 +65,11 @@
M = int(N * (N - 1)) + max(1, N - 1)
int1 = params[:M]
- s = params[M:M+N]
- int2 = params[M+N:2*M+N]
- dr = params[2*M+N:2*M+2*N]
- dp = params[2*M+2*N:2*M+3*N]
- k = params[2*M+3*N:2*M+4*N]
+ s = params[M : M + N]
+ int2 = params[M + N : 2 * M + N]
+ dr = params[2 * M + N : 2 * M + 2 * N]
+ dp = params[2 * M + 2 * N : 2 * M + 3 * N]
+ k = params[2 * M + 3 * N : 2 * M + 4 * N]
# begin layer
interferometer(int1, q)
@@ -115,7 +115,9 @@
dp_weights = tf.random.normal(shape=[layers, modes], stddev=passive_sd)
k_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)
- weights = tf.concat([int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1)
+ weights = tf.concat(
+ [int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1
+ )
weights = tf.Variable(weights)
return weights
@@ -166,6 +168,20 @@
layer(sf_params[k], q)
+def safe_abs(x):
+ # Helper function to deal with tensor terms near zero
+
+ # Check where we have near zero terms
+ EPS = 1e-15
+ x = tf.where(tf.abs(x) < EPS, tf.zeros_like(x), x)
+ zero = tf.constant(0, dtype=tf.complex64)
+ x_ok = tf.not_equal(x, zero)
+
+ # To make sure, swap out the zeros with ones
+ safe_x = tf.where(x_ok, x, tf.ones_like(x, dtype=tf.complex64))
+ return tf.where(x_ok, tf.abs(safe_x), tf.zeros_like(x, dtype=tf.float32))
+
+
def cost(weights):
# Create a dictionary mapping from the names of the Strawberry Fields
# symbolic gate parameters to the TensorFlow weight values.
@@ -175,7 +191,7 @@
state = eng.run(qnn, args=mapping).state
ket = state.ket()
- difference = tf.reduce_sum(tf.abs(ket - target_state))
+ difference = tf.reduce_sum(safe_abs(ket - target_state))
fidelity = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state)) ** 2
return difference, fidelity, ket, tf.math.real(state.trace())
| {"golden_diff": "diff --git a/examples/quantum_neural_network.py b/examples/quantum_neural_network.py\n--- a/examples/quantum_neural_network.py\n+++ b/examples/quantum_neural_network.py\n@@ -25,9 +25,9 @@\n is to be applied to\n \"\"\"\n N = len(q)\n- theta = params[:N*(N-1)//2]\n- phi = params[N*(N-1)//2:N*(N-1)]\n- rphi = params[-N+1:]\n+ theta = params[: N * (N - 1) // 2]\n+ phi = params[N * (N - 1) // 2 : N * (N - 1)]\n+ rphi = params[-N + 1 :]\n \n if N == 1:\n # the interferometer is a single rotation\n@@ -65,11 +65,11 @@\n M = int(N * (N - 1)) + max(1, N - 1)\n \n int1 = params[:M]\n- s = params[M:M+N]\n- int2 = params[M+N:2*M+N]\n- dr = params[2*M+N:2*M+2*N]\n- dp = params[2*M+2*N:2*M+3*N]\n- k = params[2*M+3*N:2*M+4*N]\n+ s = params[M : M + N]\n+ int2 = params[M + N : 2 * M + N]\n+ dr = params[2 * M + N : 2 * M + 2 * N]\n+ dp = params[2 * M + 2 * N : 2 * M + 3 * N]\n+ k = params[2 * M + 3 * N : 2 * M + 4 * N]\n \n # begin layer\n interferometer(int1, q)\n@@ -115,7 +115,9 @@\n dp_weights = tf.random.normal(shape=[layers, modes], stddev=passive_sd)\n k_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)\n \n- weights = tf.concat([int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1)\n+ weights = tf.concat(\n+ [int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1\n+ )\n weights = tf.Variable(weights)\n \n return weights\n@@ -166,6 +168,20 @@\n layer(sf_params[k], q)\n \n \n+def safe_abs(x):\n+ # Helper function to deal with tensor terms near zero\n+\n+ # Check where we have near zero terms\n+ EPS = 1e-15\n+ x = tf.where(tf.abs(x) < EPS, tf.zeros_like(x), x)\n+ zero = tf.constant(0, dtype=tf.complex64)\n+ x_ok = tf.not_equal(x, zero)\n+\n+ # To make sure, swap out the zeros with ones\n+ safe_x = tf.where(x_ok, x, tf.ones_like(x, dtype=tf.complex64))\n+ return tf.where(x_ok, tf.abs(safe_x), tf.zeros_like(x, dtype=tf.float32))\n+\n+\n def cost(weights):\n # Create a dictionary mapping from the names of the Strawberry Fields\n # symbolic gate parameters to the TensorFlow weight values.\n@@ -175,7 +191,7 @@\n state = eng.run(qnn, args=mapping).state\n ket = state.ket()\n \n- difference = tf.reduce_sum(tf.abs(ket - target_state))\n+ difference = tf.reduce_sum(safe_abs(ket - target_state))\n fidelity = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state)) ** 2\n return difference, fidelity, ket, tf.math.real(state.trace())\n", "issue": "quantum neural net example output is Nan when using more than 1 mode\n#### Issue description\r\n\r\nThe quantum_neural_network.py example output is Nan when using more than 1 mode. (working as expected for 1 mode)\r\n\r\n* *Expected behavior:* Not Nan\r\n\r\n* *Actual behavior:* ```Beginning optimization\r\nRep: 0 Cost: 7.0005 Fidelity: 0.0000 Trace: 1.0000\r\nRep: 1 Cost: nan Fidelity: nan Trace: nan\r\nRep: 2 Cost: nan Fidelity: nan Trace: nan\r\nRep: 3 Cost: nan Fidelity: nan Trace: nan```\r\n\r\n* *Reproduces how often:* 100%\r\n* *System information:* \r\n``` Strawberry Fields: a Python library for continuous-variable quantum circuits.\r\nCopyright 2018-2020 Xanadu Quantum Technologies Inc.\r\n\r\nPython version: 3.8.5\r\nPlatform info: Linux-5.8.0-53-generic-x86_64-with-glibc2.10\r\nInstallation path: /home/jonas/anaconda3/envs/strawberry/lib/python3.8/site-packages/strawberryfields\r\nStrawberry Fields version: 0.17.0\r\nNumpy version: 1.19.2\r\nScipy version: 1.4.1\r\nSymPy version: 1.7.1\r\nNetworkX version: 2.5\r\nThe Walrus version: 0.14.0\r\nBlackbird version: 0.3.1-dev\r\nTensorFlow version: 2.2.0\r\n```\r\n\r\n\r\n#### Source code and tracebacks\r\nupdate line 135 of the example quantum_neural_network.py from ```modes = 1``` to ```modes = 2``` \r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport numpy as np\nimport tensorflow as tf\nimport strawberryfields as sf\nfrom strawberryfields import ops\n\n\n# =========================================================================\n# Utility functions\n# =========================================================================\n\n\n# define interferometer\ndef interferometer(params, q):\n \"\"\"Parameterised interferometer acting on ``N`` modes.\n\n Args:\n params (list[float]): list of length ``max(1, N-1) + (N-1)*N`` parameters.\n\n * The first ``N(N-1)/2`` parameters correspond to the beamsplitter angles\n * The second ``N(N-1)/2`` parameters correspond to the beamsplitter phases\n * The final ``N-1`` parameters correspond to local rotation on the first N-1 modes\n\n q (list[RegRef]): list of Strawberry Fields quantum registers the interferometer\n is to be applied to\n \"\"\"\n N = len(q)\n theta = params[:N*(N-1)//2]\n phi = params[N*(N-1)//2:N*(N-1)]\n rphi = params[-N+1:]\n\n if N == 1:\n # the interferometer is a single rotation\n ops.Rgate(rphi[0]) | q[0]\n return\n\n n = 0 # keep track of free parameters\n\n # Apply the rectangular beamsplitter array\n # The array depth is N\n for l in range(N):\n for k, (q1, q2) in enumerate(zip(q[:-1], q[1:])):\n # skip even or odd pairs depending on layer\n if (l + k) % 2 != 1:\n ops.BSgate(theta[n], phi[n]) | (q1, q2)\n n += 1\n\n # apply the final local phase shifts to all modes except the last one\n for i in range(max(1, N - 1)):\n ops.Rgate(rphi[i]) | q[i]\n # Rgate only applied to first N - 1 modes\n\n\n# define layer\ndef layer(params, q):\n \"\"\"CV quantum neural network layer acting on ``N`` modes.\n\n Args:\n params (list[float]): list of length ``2*(max(1, N-1) + N**2 + n)`` containing\n the number of parameters for the layer\n q (list[RegRef]): list of Strawberry Fields quantum registers the layer\n is to be applied to\n \"\"\"\n N = len(q)\n M = int(N * (N - 1)) + max(1, N - 1)\n\n int1 = params[:M]\n s = params[M:M+N]\n int2 = params[M+N:2*M+N]\n dr = params[2*M+N:2*M+2*N]\n dp = params[2*M+2*N:2*M+3*N]\n k = params[2*M+3*N:2*M+4*N]\n\n # begin layer\n interferometer(int1, q)\n\n for i in range(N):\n ops.Sgate(s[i]) | q[i]\n\n interferometer(int2, q)\n\n for i in range(N):\n ops.Dgate(dr[i], dp[i]) | q[i]\n ops.Kgate(k[i]) | q[i]\n # end layer\n\n\ndef init_weights(modes, layers, active_sd=0.0001, passive_sd=0.1):\n \"\"\"Initialize a 2D TensorFlow Variable containing normally-distributed\n random weights for an ``N`` mode quantum neural network with ``L`` layers.\n\n Args:\n modes (int): the number of modes in the quantum neural network\n layers (int): the number of layers in the quantum neural network\n active_sd (float): the standard deviation used when initializing\n the normally-distributed weights for the active parameters\n (displacement, squeezing, and Kerr magnitude)\n passive_sd (float): the standard deviation used when initializing\n the normally-distributed weights for the passive parameters\n (beamsplitter angles and all gate phases)\n\n Returns:\n tf.Variable[tf.float32]: A TensorFlow Variable of shape\n ``[layers, 2*(max(1, modes-1) + modes**2 + modes)]``, where the Lth\n row represents the layer parameters for the Lth layer.\n \"\"\"\n # Number of interferometer parameters:\n M = int(modes * (modes - 1)) + max(1, modes - 1)\n\n # Create the TensorFlow variables\n int1_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)\n s_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)\n int2_weights = tf.random.normal(shape=[layers, M], stddev=passive_sd)\n dr_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)\n dp_weights = tf.random.normal(shape=[layers, modes], stddev=passive_sd)\n k_weights = tf.random.normal(shape=[layers, modes], stddev=active_sd)\n\n weights = tf.concat([int1_weights, s_weights, int2_weights, dr_weights, dp_weights, k_weights], axis=1)\n weights = tf.Variable(weights)\n\n return weights\n\n\n# =========================================================================\n# Define the optimization problem\n# =========================================================================\n\n\n# set the random seed\ntf.random.set_seed(137)\nnp.random.seed(137)\n\n\n# define width and depth of CV quantum neural network\nmodes = 1\nlayers = 8\ncutoff_dim = 6\n\n\n# defining desired state (single photon state)\ntarget_state = np.zeros(cutoff_dim)\ntarget_state[1] = 1\ntarget_state = tf.constant(target_state, dtype=tf.complex64)\n\n\n# initialize engine and program\neng = sf.Engine(backend=\"tf\", backend_options={\"cutoff_dim\": cutoff_dim})\nqnn = sf.Program(modes)\n\n\n# initialize QNN weights\nweights = init_weights(modes, layers)\nnum_params = np.prod(weights.shape)\n\n\n# Create array of Strawberry Fields symbolic gate arguments, matching\n# the size of the weights Variable.\nsf_params = np.arange(num_params).reshape(weights.shape).astype(np.str)\nsf_params = np.array([qnn.params(*i) for i in sf_params])\n\n\n# Construct the symbolic Strawberry Fields program by\n# looping and applying layers to the program.\nwith qnn.context as q:\n for k in range(layers):\n layer(sf_params[k], q)\n\n\ndef cost(weights):\n # Create a dictionary mapping from the names of the Strawberry Fields\n # symbolic gate parameters to the TensorFlow weight values.\n mapping = {p.name: w for p, w in zip(sf_params.flatten(), tf.reshape(weights, [-1]))}\n\n # run the engine\n state = eng.run(qnn, args=mapping).state\n ket = state.ket()\n\n difference = tf.reduce_sum(tf.abs(ket - target_state))\n fidelity = tf.abs(tf.reduce_sum(tf.math.conj(ket) * target_state)) ** 2\n return difference, fidelity, ket, tf.math.real(state.trace())\n\n\n# set up optimizer\nopt = tf.keras.optimizers.Adam()\ncost_before, fidelity_before, _, _ = cost(weights)\n\nprint(\"Beginning optimization\")\n\n\n# Perform the optimization\nfor i in range(1000):\n # reset the engine if it has already been executed\n if eng.run_progs:\n eng.reset()\n\n with tf.GradientTape() as tape:\n loss, fid, _, trace = cost(weights)\n\n # one repetition of the optimization\n gradients = tape.gradient(loss, weights)\n opt.apply_gradients(zip([gradients], [weights]))\n\n # Prints progress at every rep\n if i % 1 == 0:\n print(\"Rep: {} Cost: {:.4f} Fidelity: {:.4f} Trace: {:.4f}\".format(i, loss, fid, trace))\n\n\ncost_after, fidelity_after, ket_after, _ = cost(weights)\n\n\nprint(\"\\nFidelity before optimization: \", fidelity_before.numpy())\nprint(\"Fidelity after optimization: \", fidelity_after.numpy())\nprint(\"\\nTarget state: \", target_state.numpy())\nprint(\"Output state: \", np.round(ket_after.numpy(), decimals=3))\n", "path": "examples/quantum_neural_network.py"}]} | 3,286 | 865 |
gh_patches_debug_16505 | rasdani/github-patches | git_diff | conda__conda-build-2005 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`conda skeleton pypi pyinstrument` tries to index Anaconda.org
_From @olgabot on September 9, 2016 22:4_
I want to post my PyPI package to Anaconda.org and I'm following along with [this](http://conda.pydata.org/docs/build_tutorials/pkgs.html?highlight=anaconda%20upload) tutorial which uses `pyinstrument` as an example, but I'm getting this error that seems to be stemming from a `conda index` command. This error happens no matter which PyPI package I use, even ones I own. Do you know what may be happening?
Side note: It would be helpful to know what to expect to see in a "skeleton"-created folder.
```
$ conda skeleton pypi pyinstrument
INFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): pypi.python.org
Warning, the following versions were found for pyinstrument
0.10.1
0.11
0.12
0.13
0.13.1
Using 0.13.1
Use --version to specify a different version.
Using url https://pypi.python.org/packages/64/56/d7a0d48973dcf58ea74d5f004e16e94969e03ae783b46f86f42f35a6b81b/pyinstrument-0.13.1.tar.gz (44 KB) for pyinstrument.
Downloading pyinstrument
INFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): pypi.python.org
INFO:fetch.start:('pyinstrument-0', 45395)
INFO:fetch.update:16384
INFO:fetch.update:32768
INFO:fetch.update:45395
INFO:fetch.update:45395
INFO:fetch.stop:None
Unpacking pyinstrument...
done
working in /var/folders/6l/83vj6nxn6g1b6l2c9ycym4hc0000gn/T/tmp9bdr5b9aconda_skeleton_pyinstrument-0.13.1.tar.gz
Error:
Indexing a copy of the Anaconda conda package channel is neither
necessary nor supported. If you wish to add your own packages,
you can do so by adding them to a separate channel.
```
Here's the output from `conda info`:
```
$ conda info
Warning: could not import binstar_client ('args' object has no attribute 'site')Current conda install:
platform : osx-64
conda version : 4.1.12
conda-env version : 2.5.2
conda-build version : 2.0.1
python version : 3.5.2.final.0
requests version : 2.10.0
root environment : /Users/olga/anaconda3 (writable)
default environment : /Users/olga/anaconda3
envs directories : /Users/olga/anaconda3/envs
package cache : /Users/olga/anaconda3/pkgs
channel URLs : https://conda.anaconda.org/r/osx-64/
https://conda.anaconda.org/r/noarch/
https://conda.anaconda.org/bioconda/osx-64/
https://conda.anaconda.org/bioconda/noarch/
https://repo.continuum.io/pkgs/free/osx-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/osx-64/
https://repo.continuum.io/pkgs/pro/noarch/
config file : /Users/olga/.condarc
offline mode : False
is foreign system : False
```
_Copied from original issue: conda/conda#3406_
</issue>
<code>
[start of conda_build/index.py]
1 '''
2 Functions related to creating repodata index files.
3 '''
4
5 from __future__ import absolute_import, division, print_function
6
7 import os
8 import bz2
9 import sys
10 import json
11 import tarfile
12 from os.path import isfile, join, getmtime
13
14 from conda_build.utils import file_info, get_lock, try_acquire_locks
15 from .conda_interface import PY3, md5_file
16
17
18 def read_index_tar(tar_path, config, lock):
19 """ Returns the index.json dict inside the given package tarball. """
20 locks = []
21 if config.locking:
22 locks = [lock]
23 with try_acquire_locks(locks, config.timeout):
24 with tarfile.open(tar_path) as t:
25 try:
26 return json.loads(t.extractfile('info/index.json').read().decode('utf-8'))
27 except EOFError:
28 raise RuntimeError("Could not extract %s. File probably corrupt."
29 % tar_path)
30 except OSError as e:
31 raise RuntimeError("Could not extract %s (%s)" % (tar_path, e))
32 except tarfile.ReadError:
33 raise RuntimeError("Could not extract metadata from %s. "
34 "File probably corrupt." % tar_path)
35
36
37 def write_repodata(repodata, dir_path, lock, config=None):
38 """ Write updated repodata.json and repodata.json.bz2 """
39 if not config:
40 import conda_build.config
41 config = conda_build.config.config
42 locks = []
43 if config.locking:
44 locks = [lock]
45 with try_acquire_locks(locks, config.timeout):
46 data = json.dumps(repodata, indent=2, sort_keys=True)
47 # strip trailing whitespace
48 data = '\n'.join(line.rstrip() for line in data.splitlines())
49 # make sure we have newline at the end
50 if not data.endswith('\n'):
51 data += '\n'
52 with open(join(dir_path, 'repodata.json'), 'w') as fo:
53 fo.write(data)
54 with open(join(dir_path, 'repodata.json.bz2'), 'wb') as fo:
55 fo.write(bz2.compress(data.encode('utf-8')))
56
57
58 def update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None,
59 could_be_mirror=True):
60 """
61 Update all index files in dir_path with changed packages.
62
63 :param verbose: Should detailed status messages be output?
64 :type verbose: bool
65 :param force: Whether to re-index all packages (including those that
66 haven't changed) or not.
67 :type force: bool
68 :param check_md5: Whether to check MD5s instead of mtimes for determining
69 if a package changed.
70 :type check_md5: bool
71 """
72
73 if config.verbose:
74 print("updating index in:", dir_path)
75 index_path = join(dir_path, '.index.json')
76 if not os.path.isdir(dir_path):
77 os.makedirs(dir_path)
78
79 if not lock:
80 lock = get_lock(dir_path)
81
82 locks = []
83 if config.locking:
84 locks.append(lock)
85
86 with try_acquire_locks(locks, config.timeout):
87 if force:
88 index = {}
89 else:
90 try:
91 mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}
92 with open(index_path, **mode_dict) as fi:
93 index = json.load(fi)
94 except (IOError, ValueError):
95 index = {}
96
97 files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))
98 if could_be_mirror and any(fn.startswith('_license-') for fn in files):
99 sys.exit("""\
100 Error:
101 Indexing a copy of the Anaconda conda package channel is neither
102 necessary nor supported. If you wish to add your own packages,
103 you can do so by adding them to a separate channel.
104 """)
105 for fn in files:
106 path = join(dir_path, fn)
107 if fn in index:
108 if check_md5:
109 if index[fn]['md5'] == md5_file(path):
110 continue
111 elif index[fn]['mtime'] == getmtime(path):
112 continue
113 if config.verbose:
114 print('updating:', fn)
115 d = read_index_tar(path, config, lock=lock)
116 d.update(file_info(path))
117 index[fn] = d
118
119 for fn in files:
120 index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None
121
122 if remove:
123 # remove files from the index which are not on disk
124 for fn in set(index) - files:
125 if config.verbose:
126 print("removing:", fn)
127 del index[fn]
128
129 # Deal with Python 2 and 3's different json module type reqs
130 mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}
131 with open(index_path, **mode_dict) as fo:
132 json.dump(index, fo, indent=2, sort_keys=True, default=str)
133
134 # --- new repodata
135 for fn in index:
136 info = index[fn]
137 for varname in 'arch', 'platform', 'mtime', 'ucs':
138 try:
139 del info[varname]
140 except KeyError:
141 pass
142
143 if 'requires' in info and 'depends' not in info:
144 info['depends'] = info['requires']
145
146 repodata = {'packages': index, 'info': {}}
147 write_repodata(repodata, dir_path, lock=lock, config=config)
148
[end of conda_build/index.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_build/index.py b/conda_build/index.py
--- a/conda_build/index.py
+++ b/conda_build/index.py
@@ -6,7 +6,6 @@
import os
import bz2
-import sys
import json
import tarfile
from os.path import isfile, join, getmtime
@@ -95,13 +94,6 @@
index = {}
files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))
- if could_be_mirror and any(fn.startswith('_license-') for fn in files):
- sys.exit("""\
- Error:
- Indexing a copy of the Anaconda conda package channel is neither
- necessary nor supported. If you wish to add your own packages,
- you can do so by adding them to a separate channel.
- """)
for fn in files:
path = join(dir_path, fn)
if fn in index:
| {"golden_diff": "diff --git a/conda_build/index.py b/conda_build/index.py\n--- a/conda_build/index.py\n+++ b/conda_build/index.py\n@@ -6,7 +6,6 @@\n \n import os\n import bz2\n-import sys\n import json\n import tarfile\n from os.path import isfile, join, getmtime\n@@ -95,13 +94,6 @@\n index = {}\n \n files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))\n- if could_be_mirror and any(fn.startswith('_license-') for fn in files):\n- sys.exit(\"\"\"\\\n- Error:\n- Indexing a copy of the Anaconda conda package channel is neither\n- necessary nor supported. If you wish to add your own packages,\n- you can do so by adding them to a separate channel.\n- \"\"\")\n for fn in files:\n path = join(dir_path, fn)\n if fn in index:\n", "issue": "`conda skeleton pypi pyinstrument` tries to index Anaconda.org\n_From @olgabot on September 9, 2016 22:4_\n\nI want to post my PyPI package to Anaconda.org and I'm following along with [this](http://conda.pydata.org/docs/build_tutorials/pkgs.html?highlight=anaconda%20upload) tutorial which uses `pyinstrument` as an example, but I'm getting this error that seems to be stemming from a `conda index` command. This error happens no matter which PyPI package I use, even ones I own. Do you know what may be happening?\n\nSide note: It would be helpful to know what to expect to see in a \"skeleton\"-created folder.\n\n```\n$ conda skeleton pypi pyinstrument\nINFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): pypi.python.org\nWarning, the following versions were found for pyinstrument\n0.10.1\n0.11\n0.12\n0.13\n0.13.1\nUsing 0.13.1\nUse --version to specify a different version.\nUsing url https://pypi.python.org/packages/64/56/d7a0d48973dcf58ea74d5f004e16e94969e03ae783b46f86f42f35a6b81b/pyinstrument-0.13.1.tar.gz (44 KB) for pyinstrument.\nDownloading pyinstrument\nINFO:requests.packages.urllib3.connectionpool:Starting new HTTPS connection (1): pypi.python.org\nINFO:fetch.start:('pyinstrument-0', 45395)\nINFO:fetch.update:16384\nINFO:fetch.update:32768\nINFO:fetch.update:45395\nINFO:fetch.update:45395\nINFO:fetch.stop:None\nUnpacking pyinstrument...\ndone\nworking in /var/folders/6l/83vj6nxn6g1b6l2c9ycym4hc0000gn/T/tmp9bdr5b9aconda_skeleton_pyinstrument-0.13.1.tar.gz\nError:\n Indexing a copy of the Anaconda conda package channel is neither\n necessary nor supported. If you wish to add your own packages,\n you can do so by adding them to a separate channel.\n```\n\nHere's the output from `conda info`:\n\n```\n$ conda info\nWarning: could not import binstar_client ('args' object has no attribute 'site')Current conda install:\n\n platform : osx-64\n conda version : 4.1.12\n conda-env version : 2.5.2\n conda-build version : 2.0.1\n python version : 3.5.2.final.0\n requests version : 2.10.0\n root environment : /Users/olga/anaconda3 (writable)\n default environment : /Users/olga/anaconda3\n envs directories : /Users/olga/anaconda3/envs\n package cache : /Users/olga/anaconda3/pkgs\n channel URLs : https://conda.anaconda.org/r/osx-64/\n https://conda.anaconda.org/r/noarch/\n https://conda.anaconda.org/bioconda/osx-64/\n https://conda.anaconda.org/bioconda/noarch/\n https://repo.continuum.io/pkgs/free/osx-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/osx-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n config file : /Users/olga/.condarc\n offline mode : False\n is foreign system : False\n```\n\n\n_Copied from original issue: conda/conda#3406_\n", "before_files": [{"content": "'''\nFunctions related to creating repodata index files.\n'''\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport bz2\nimport sys\nimport json\nimport tarfile\nfrom os.path import isfile, join, getmtime\n\nfrom conda_build.utils import file_info, get_lock, try_acquire_locks\nfrom .conda_interface import PY3, md5_file\n\n\ndef read_index_tar(tar_path, config, lock):\n \"\"\" Returns the index.json dict inside the given package tarball. \"\"\"\n locks = []\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n with tarfile.open(tar_path) as t:\n try:\n return json.loads(t.extractfile('info/index.json').read().decode('utf-8'))\n except EOFError:\n raise RuntimeError(\"Could not extract %s. File probably corrupt.\"\n % tar_path)\n except OSError as e:\n raise RuntimeError(\"Could not extract %s (%s)\" % (tar_path, e))\n except tarfile.ReadError:\n raise RuntimeError(\"Could not extract metadata from %s. \"\n \"File probably corrupt.\" % tar_path)\n\n\ndef write_repodata(repodata, dir_path, lock, config=None):\n \"\"\" Write updated repodata.json and repodata.json.bz2 \"\"\"\n if not config:\n import conda_build.config\n config = conda_build.config.config\n locks = []\n if config.locking:\n locks = [lock]\n with try_acquire_locks(locks, config.timeout):\n data = json.dumps(repodata, indent=2, sort_keys=True)\n # strip trailing whitespace\n data = '\\n'.join(line.rstrip() for line in data.splitlines())\n # make sure we have newline at the end\n if not data.endswith('\\n'):\n data += '\\n'\n with open(join(dir_path, 'repodata.json'), 'w') as fo:\n fo.write(data)\n with open(join(dir_path, 'repodata.json.bz2'), 'wb') as fo:\n fo.write(bz2.compress(data.encode('utf-8')))\n\n\ndef update_index(dir_path, config, force=False, check_md5=False, remove=True, lock=None,\n could_be_mirror=True):\n \"\"\"\n Update all index files in dir_path with changed packages.\n\n :param verbose: Should detailed status messages be output?\n :type verbose: bool\n :param force: Whether to re-index all packages (including those that\n haven't changed) or not.\n :type force: bool\n :param check_md5: Whether to check MD5s instead of mtimes for determining\n if a package changed.\n :type check_md5: bool\n \"\"\"\n\n if config.verbose:\n print(\"updating index in:\", dir_path)\n index_path = join(dir_path, '.index.json')\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n if not lock:\n lock = get_lock(dir_path)\n\n locks = []\n if config.locking:\n locks.append(lock)\n\n with try_acquire_locks(locks, config.timeout):\n if force:\n index = {}\n else:\n try:\n mode_dict = {'mode': 'r', 'encoding': 'utf-8'} if PY3 else {'mode': 'rb'}\n with open(index_path, **mode_dict) as fi:\n index = json.load(fi)\n except (IOError, ValueError):\n index = {}\n\n files = set(fn for fn in os.listdir(dir_path) if fn.endswith('.tar.bz2'))\n if could_be_mirror and any(fn.startswith('_license-') for fn in files):\n sys.exit(\"\"\"\\\n Error:\n Indexing a copy of the Anaconda conda package channel is neither\n necessary nor supported. If you wish to add your own packages,\n you can do so by adding them to a separate channel.\n \"\"\")\n for fn in files:\n path = join(dir_path, fn)\n if fn in index:\n if check_md5:\n if index[fn]['md5'] == md5_file(path):\n continue\n elif index[fn]['mtime'] == getmtime(path):\n continue\n if config.verbose:\n print('updating:', fn)\n d = read_index_tar(path, config, lock=lock)\n d.update(file_info(path))\n index[fn] = d\n\n for fn in files:\n index[fn]['sig'] = '.' if isfile(join(dir_path, fn + '.sig')) else None\n\n if remove:\n # remove files from the index which are not on disk\n for fn in set(index) - files:\n if config.verbose:\n print(\"removing:\", fn)\n del index[fn]\n\n # Deal with Python 2 and 3's different json module type reqs\n mode_dict = {'mode': 'w', 'encoding': 'utf-8'} if PY3 else {'mode': 'wb'}\n with open(index_path, **mode_dict) as fo:\n json.dump(index, fo, indent=2, sort_keys=True, default=str)\n\n # --- new repodata\n for fn in index:\n info = index[fn]\n for varname in 'arch', 'platform', 'mtime', 'ucs':\n try:\n del info[varname]\n except KeyError:\n pass\n\n if 'requires' in info and 'depends' not in info:\n info['depends'] = info['requires']\n\n repodata = {'packages': index, 'info': {}}\n write_repodata(repodata, dir_path, lock=lock, config=config)\n", "path": "conda_build/index.py"}]} | 2,993 | 212 |
gh_patches_debug_22162 | rasdani/github-patches | git_diff | bids-standard__pybids-71 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Force UTF-8 encoding anywhere JSON files are read in
Per an issue on the mailing list, we should explicitly force UTF-8 encoding whenever we work with JSON files, because the BIDS spec already mandates UTF-8 for spec compliance. Otherwise we risk failure on platforms where the default encoding is something else.
</issue>
<code>
[start of bids/grabbids/bids_layout.py]
1 import os
2 import re
3 import json
4
5 from os.path import dirname
6 from os.path import abspath
7 from os.path import join as pathjoin
8 from os.path import basename
9
10 from grabbit import Layout
11
12 __all__ = ['BIDSLayout']
13
14
15 class BIDSLayout(Layout):
16 def __init__(self, path, config=None, **kwargs):
17 if config is None:
18 root = dirname(abspath(__file__))
19 config = pathjoin(root, 'config', 'bids.json')
20 super(BIDSLayout, self).__init__(path, config,
21 dynamic_getters=True, **kwargs)
22
23 def _validate_file(self, f):
24 # Return False to exclude a file from indexing. This should call
25 # some kind of validation regex.
26 return True
27
28 def _get_nearest_helper(self, path, extension, type=None, **kwargs):
29 path = abspath(path)
30
31 if path not in self.files:
32 raise ValueError("File '%s' could not be found in the current BIDS"
33 " project." % path)
34
35 if not type:
36 # Constrain the search to .json files with the same type as target
37 type = self.files[path].entities['type']
38
39 tmp = self.get_nearest(path, extensions=extension, all_=True,
40 type=type, ignore_strict_entities=['type'],
41 **kwargs)
42
43 if len(tmp):
44 return tmp
45 else:
46 return None
47
48 def get_metadata(self, path, **kwargs):
49
50 potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)
51 if not isinstance(potentialJSONs, list): return potentialJSONs
52
53 merged_param_dict = {}
54 for json_file_path in reversed(potentialJSONs):
55 if os.path.exists(json_file_path):
56 param_dict = json.load(open(json_file_path, "r"))
57 merged_param_dict.update(param_dict)
58
59 return merged_param_dict
60
61 def get_bvec(self, path, **kwargs):
62 tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]
63 if isinstance(tmp, list):
64 return tmp[0]
65 else:
66 return tmp
67
68 def get_bval(self, path, **kwargs):
69 tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]
70 if isinstance(tmp, list):
71 return tmp[0]
72 else:
73 return tmp
74
75 def get_events(self, path, **kwargs):
76 tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)
77 if isinstance(tmp, list):
78 return tmp[0]
79 else:
80 return tmp
81
82 def get_fieldmap(self, path, return_list=False):
83 fieldmaps = self._get_fieldmaps(path)
84
85 if return_list:
86 return fieldmaps
87 else:
88 if len(fieldmaps) == 1:
89 return fieldmaps[0]
90 elif len(fieldmaps) > 1:
91 raise ValueError("More than one fieldmap found, but the "
92 "'return_list' argument was set to False. "
93 "Either ensure that there is only one "
94 "fieldmap for this image, or set the "
95 "'return_list' argument to True and handle "
96 "the result as a list.")
97 else: # len(fieldmaps) == 0
98 return None
99
100 def _get_fieldmaps(self, path):
101 sub = os.path.split(path)[1].split("_")[0].split("sub-")[1]
102 fieldmap_set = []
103 type_ = '(phase1|phasediff|epi|fieldmap)'
104 for file in self.get(subject=sub, type=type_,
105 extensions=['nii.gz', 'nii']):
106 metadata = self.get_metadata(file.filename)
107 if metadata and "IntendedFor" in metadata.keys():
108 if isinstance(metadata["IntendedFor"], list):
109 intended_for = metadata["IntendedFor"]
110 else:
111 intended_for = [metadata["IntendedFor"]]
112 if any([path.endswith(suffix) for suffix in intended_for]):
113 cur_fieldmap = {}
114 if file.type == "phasediff":
115 cur_fieldmap = {"phasediff": file.filename,
116 "magnitude1": file.filename.replace(
117 "phasediff", "magnitude1"),
118 "magnitude2": file.filename.replace(
119 "phasediff", "magnitude2"),
120 "type": "phasediff"}
121 elif file.type == "phase1":
122 cur_fieldmap["phase1"] = file.filename
123 cur_fieldmap["magnitude1"] = \
124 file.filename.replace("phase1", "magnitude1")
125 cur_fieldmap["phase2"] = \
126 file.filename.replace("phase1", "phase2")
127 cur_fieldmap["magnitude2"] = \
128 file.filename.replace("phase1", "magnitude2")
129 cur_fieldmap["type"] = "phase"
130 elif file.type == "epi":
131 cur_fieldmap["epi"] = file.filename
132 cur_fieldmap["type"] = "epi"
133 elif file.type == "fieldmap":
134 cur_fieldmap["fieldmap"] = file.filename
135 cur_fieldmap["magnitude"] = \
136 file.filename.replace("fieldmap", "magnitude")
137 cur_fieldmap["type"] = "fieldmap"
138 fieldmap_set.append(cur_fieldmap)
139 return fieldmap_set
140
141 def find_match(self, target, source=None):
142
143 # Try to take the easy way out
144 if source is not None:
145 _target = source.split('.')[0] + '.' + target
146 if os.path.exists(_target):
147 return target
148
149 if target in list(self.entities.keys()):
150 candidates = list(self.entities[target].files.keys())
151 else:
152 candidates = []
153
154 for root, directories, filenames in os.walk(self.root):
155 for f in filenames:
156 if re.search(target + '$', f):
157 if os.path.sep == "\\":
158 f = f.replace("\\", "\\\\")
159 candidates.append(f)
160
161 if source is None:
162 return candidates
163
164 # Walk up the file hierarchy from source, find first match
165 if not os.path.exists(source):
166 raise OSError("The file '%s' doesn't exist." % source)
167 elif not source.startswith(self.root):
168 raise ValueError("The file '%s' is not contained "
169 "within the current project "
170 "directory (%s)." % (source, self.root))
171 rel = os.path.relpath(dirname(source), self.root)
172 sep = os.path.sep
173 chunks = rel.split(sep)
174 n_chunks = len(chunks)
175 for i in range(n_chunks, -1, -1):
176 path = pathjoin(self.root, *chunks[:i])
177 patt = path + '\%s[^\%s]+$' % (sep, sep)
178 if sep == "\\":
179 patt = path + '\\[^\\]+$'
180 patt = patt.replace("\\", "\\\\")
181 matches = [x for x in candidates if re.search(patt, x)]
182 if matches:
183 if len(matches) == 1:
184 return matches[0]
185 else:
186 raise ValueError("Ambiguous target: more than one "
187 "candidate file found in "
188 "directory '%s'." % path)
189 return None
190
[end of bids/grabbids/bids_layout.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py
--- a/bids/grabbids/bids_layout.py
+++ b/bids/grabbids/bids_layout.py
@@ -5,7 +5,6 @@
from os.path import dirname
from os.path import abspath
from os.path import join as pathjoin
-from os.path import basename
from grabbit import Layout
@@ -48,12 +47,15 @@
def get_metadata(self, path, **kwargs):
potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)
- if not isinstance(potentialJSONs, list): return potentialJSONs
+
+ if not isinstance(potentialJSONs, list):
+ return potentialJSONs
merged_param_dict = {}
for json_file_path in reversed(potentialJSONs):
if os.path.exists(json_file_path):
- param_dict = json.load(open(json_file_path, "r"))
+ param_dict = json.load(open(json_file_path, "r",
+ encoding='utf-8'))
merged_param_dict.update(param_dict)
return merged_param_dict
| {"golden_diff": "diff --git a/bids/grabbids/bids_layout.py b/bids/grabbids/bids_layout.py\n--- a/bids/grabbids/bids_layout.py\n+++ b/bids/grabbids/bids_layout.py\n@@ -5,7 +5,6 @@\n from os.path import dirname\n from os.path import abspath\n from os.path import join as pathjoin\n-from os.path import basename\n \n from grabbit import Layout\n \n@@ -48,12 +47,15 @@\n def get_metadata(self, path, **kwargs):\n \n potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)\n- if not isinstance(potentialJSONs, list): return potentialJSONs\n+\n+ if not isinstance(potentialJSONs, list):\n+ return potentialJSONs\n \n merged_param_dict = {}\n for json_file_path in reversed(potentialJSONs):\n if os.path.exists(json_file_path):\n- param_dict = json.load(open(json_file_path, \"r\"))\n+ param_dict = json.load(open(json_file_path, \"r\",\n+ encoding='utf-8'))\n merged_param_dict.update(param_dict)\n \n return merged_param_dict\n", "issue": "Force UTF-8 encoding anywhere JSON files are read in\nPer an issue on the mailing list, we should explicitly force UTF-8 encoding whenever we work with JSON files, because the BIDS spec already mandates UTF-8 for spec compliance. Otherwise we risk failure on platforms where the default encoding is something else.\n", "before_files": [{"content": "import os\nimport re\nimport json\n\nfrom os.path import dirname\nfrom os.path import abspath\nfrom os.path import join as pathjoin\nfrom os.path import basename\n\nfrom grabbit import Layout\n\n__all__ = ['BIDSLayout']\n\n\nclass BIDSLayout(Layout):\n def __init__(self, path, config=None, **kwargs):\n if config is None:\n root = dirname(abspath(__file__))\n config = pathjoin(root, 'config', 'bids.json')\n super(BIDSLayout, self).__init__(path, config,\n dynamic_getters=True, **kwargs)\n\n def _validate_file(self, f):\n # Return False to exclude a file from indexing. This should call\n # some kind of validation regex.\n return True\n\n def _get_nearest_helper(self, path, extension, type=None, **kwargs):\n path = abspath(path)\n\n if path not in self.files:\n raise ValueError(\"File '%s' could not be found in the current BIDS\"\n \" project.\" % path)\n\n if not type:\n # Constrain the search to .json files with the same type as target\n type = self.files[path].entities['type']\n\n tmp = self.get_nearest(path, extensions=extension, all_=True,\n type=type, ignore_strict_entities=['type'],\n **kwargs)\n\n if len(tmp):\n return tmp\n else:\n return None\n\n def get_metadata(self, path, **kwargs):\n\n potentialJSONs = self._get_nearest_helper(path, '.json', **kwargs)\n if not isinstance(potentialJSONs, list): return potentialJSONs\n\n merged_param_dict = {}\n for json_file_path in reversed(potentialJSONs):\n if os.path.exists(json_file_path):\n param_dict = json.load(open(json_file_path, \"r\"))\n merged_param_dict.update(param_dict)\n\n return merged_param_dict\n\n def get_bvec(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bvec', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_bval(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, 'bval', type='dwi', **kwargs)[0]\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_events(self, path, **kwargs):\n tmp = self._get_nearest_helper(path, '.tsv', type='events', **kwargs)\n if isinstance(tmp, list):\n return tmp[0]\n else:\n return tmp\n\n def get_fieldmap(self, path, return_list=False):\n fieldmaps = self._get_fieldmaps(path)\n\n if return_list:\n return fieldmaps\n else:\n if len(fieldmaps) == 1:\n return fieldmaps[0]\n elif len(fieldmaps) > 1:\n raise ValueError(\"More than one fieldmap found, but the \"\n \"'return_list' argument was set to False. \"\n \"Either ensure that there is only one \"\n \"fieldmap for this image, or set the \"\n \"'return_list' argument to True and handle \"\n \"the result as a list.\")\n else: # len(fieldmaps) == 0\n return None\n\n def _get_fieldmaps(self, path):\n sub = os.path.split(path)[1].split(\"_\")[0].split(\"sub-\")[1]\n fieldmap_set = []\n type_ = '(phase1|phasediff|epi|fieldmap)'\n for file in self.get(subject=sub, type=type_,\n extensions=['nii.gz', 'nii']):\n metadata = self.get_metadata(file.filename)\n if metadata and \"IntendedFor\" in metadata.keys():\n if isinstance(metadata[\"IntendedFor\"], list):\n intended_for = metadata[\"IntendedFor\"]\n else:\n intended_for = [metadata[\"IntendedFor\"]]\n if any([path.endswith(suffix) for suffix in intended_for]):\n cur_fieldmap = {}\n if file.type == \"phasediff\":\n cur_fieldmap = {\"phasediff\": file.filename,\n \"magnitude1\": file.filename.replace(\n \"phasediff\", \"magnitude1\"),\n \"magnitude2\": file.filename.replace(\n \"phasediff\", \"magnitude2\"),\n \"type\": \"phasediff\"}\n elif file.type == \"phase1\":\n cur_fieldmap[\"phase1\"] = file.filename\n cur_fieldmap[\"magnitude1\"] = \\\n file.filename.replace(\"phase1\", \"magnitude1\")\n cur_fieldmap[\"phase2\"] = \\\n file.filename.replace(\"phase1\", \"phase2\")\n cur_fieldmap[\"magnitude2\"] = \\\n file.filename.replace(\"phase1\", \"magnitude2\")\n cur_fieldmap[\"type\"] = \"phase\"\n elif file.type == \"epi\":\n cur_fieldmap[\"epi\"] = file.filename\n cur_fieldmap[\"type\"] = \"epi\"\n elif file.type == \"fieldmap\":\n cur_fieldmap[\"fieldmap\"] = file.filename\n cur_fieldmap[\"magnitude\"] = \\\n file.filename.replace(\"fieldmap\", \"magnitude\")\n cur_fieldmap[\"type\"] = \"fieldmap\"\n fieldmap_set.append(cur_fieldmap)\n return fieldmap_set\n\n def find_match(self, target, source=None):\n\n # Try to take the easy way out\n if source is not None:\n _target = source.split('.')[0] + '.' + target\n if os.path.exists(_target):\n return target\n\n if target in list(self.entities.keys()):\n candidates = list(self.entities[target].files.keys())\n else:\n candidates = []\n\n for root, directories, filenames in os.walk(self.root):\n for f in filenames:\n if re.search(target + '$', f):\n if os.path.sep == \"\\\\\":\n f = f.replace(\"\\\\\", \"\\\\\\\\\")\n candidates.append(f)\n\n if source is None:\n return candidates\n\n # Walk up the file hierarchy from source, find first match\n if not os.path.exists(source):\n raise OSError(\"The file '%s' doesn't exist.\" % source)\n elif not source.startswith(self.root):\n raise ValueError(\"The file '%s' is not contained \"\n \"within the current project \"\n \"directory (%s).\" % (source, self.root))\n rel = os.path.relpath(dirname(source), self.root)\n sep = os.path.sep\n chunks = rel.split(sep)\n n_chunks = len(chunks)\n for i in range(n_chunks, -1, -1):\n path = pathjoin(self.root, *chunks[:i])\n patt = path + '\\%s[^\\%s]+$' % (sep, sep)\n if sep == \"\\\\\":\n patt = path + '\\\\[^\\\\]+$'\n patt = patt.replace(\"\\\\\", \"\\\\\\\\\")\n matches = [x for x in candidates if re.search(patt, x)]\n if matches:\n if len(matches) == 1:\n return matches[0]\n else:\n raise ValueError(\"Ambiguous target: more than one \"\n \"candidate file found in \"\n \"directory '%s'.\" % path)\n return None\n", "path": "bids/grabbids/bids_layout.py"}]} | 2,643 | 257 |
gh_patches_debug_33030 | rasdani/github-patches | git_diff | pypa__cibuildwheel-1613 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup.py setup() not detected in __name__ == '__main__' block
### Description
My setup.py setup() includes:
python_requires=">=3.8"
However cibuildwheel still tries and fails to compile under Python 3.6.
I understand there is [CIBW_BUILD / CIBW_SKIP](https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip) but that is then duplicating the python requires information.
I can add a \[project\] section to pyproject.toml but that leads to a lot of problems because it ends up fighting with setup() parameters and they **really** don't like it.
I believe cibuildwheel should establish the Python version support automatically whether it comes from setuptools or pyproject.toml, and not try to build on unsupported versions. My [pyproject.toml](https://github.com/rogerbinns/apsw/blob/master/pyproject.toml) is:
````
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
````
### Build log
https://github.com/rogerbinns/apsw/actions/runs/6175182758/job/16761477543
### CI config
https://github.com/rogerbinns/apsw/actions/runs/6175182758/workflow
</issue>
<code>
[start of cibuildwheel/projectfiles.py]
1 from __future__ import annotations
2
3 import ast
4 import configparser
5 import contextlib
6 from pathlib import Path
7
8 from ._compat import tomllib
9
10
11 class Analyzer(ast.NodeVisitor):
12 def __init__(self) -> None:
13 self.requires_python: str | None = None
14
15 def visit(self, node: ast.AST) -> None:
16 for inner_node in ast.walk(node):
17 for child in ast.iter_child_nodes(inner_node):
18 child.parent = inner_node # type: ignore[attr-defined]
19 super().visit(node)
20
21 def visit_keyword(self, node: ast.keyword) -> None:
22 self.generic_visit(node)
23 # Must not be nested in an if or other structure
24 # This will be Module -> Expr -> Call -> keyword
25 if (
26 node.arg == "python_requires"
27 and not hasattr(node.parent.parent.parent, "parent") # type: ignore[attr-defined]
28 and isinstance(node.value, ast.Constant)
29 ):
30 self.requires_python = node.value.value
31
32
33 def setup_py_python_requires(content: str) -> str | None:
34 try:
35 tree = ast.parse(content)
36 analyzer = Analyzer()
37 analyzer.visit(tree)
38 return analyzer.requires_python or None
39 except Exception: # pylint: disable=broad-except
40 return None
41
42
43 def get_requires_python_str(package_dir: Path) -> str | None:
44 """Return the python requires string from the most canonical source available, or None"""
45
46 # Read in from pyproject.toml:project.requires-python
47 with contextlib.suppress(FileNotFoundError):
48 with (package_dir / "pyproject.toml").open("rb") as f1:
49 info = tomllib.load(f1)
50 with contextlib.suppress(KeyError, IndexError, TypeError):
51 return str(info["project"]["requires-python"])
52
53 # Read in from setup.cfg:options.python_requires
54 config = configparser.ConfigParser()
55 with contextlib.suppress(FileNotFoundError):
56 config.read(package_dir / "setup.cfg")
57 with contextlib.suppress(KeyError, IndexError, TypeError):
58 return str(config["options"]["python_requires"])
59
60 setup_py = package_dir / "setup.py"
61 with contextlib.suppress(FileNotFoundError), setup_py.open(encoding="utf8") as f2:
62 return setup_py_python_requires(f2.read())
63
64 return None
65
[end of cibuildwheel/projectfiles.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py
--- a/cibuildwheel/projectfiles.py
+++ b/cibuildwheel/projectfiles.py
@@ -8,6 +8,43 @@
from ._compat import tomllib
+def get_parent(node: ast.AST | None, depth: int = 1) -> ast.AST | None:
+ for _ in range(depth):
+ node = getattr(node, "parent", None)
+ return node
+
+
+def is_main(parent: ast.AST | None) -> bool:
+ if parent is None:
+ return False
+
+ # This would be much nicer with 3.10's pattern matching!
+ if not isinstance(parent, ast.If):
+ return False
+ if not isinstance(parent.test, ast.Compare):
+ return False
+
+ try:
+ (op,) = parent.test.ops
+ (comp,) = parent.test.comparators
+ except ValueError:
+ return False
+
+ if not isinstance(op, ast.Eq):
+ return False
+
+ values = {comp, parent.test.left}
+
+ mains = {x for x in values if isinstance(x, ast.Constant) and x.value == "__main__"}
+ if len(mains) != 1:
+ return False
+ consts = {x for x in values if isinstance(x, ast.Name) and x.id == "__name__"}
+ if len(consts) != 1:
+ return False
+
+ return True
+
+
class Analyzer(ast.NodeVisitor):
def __init__(self) -> None:
self.requires_python: str | None = None
@@ -19,13 +56,22 @@
super().visit(node)
def visit_keyword(self, node: ast.keyword) -> None:
+ # Must not be nested except for if __name__ == "__main__"
+
self.generic_visit(node)
- # Must not be nested in an if or other structure
# This will be Module -> Expr -> Call -> keyword
+ parent = get_parent(node, 4)
+ unnested = parent is None
+
+ # This will be Module -> If -> Expr -> Call -> keyword
+ name_main_unnested = (
+ parent is not None and get_parent(parent) is None and is_main(get_parent(node, 3))
+ )
+
if (
node.arg == "python_requires"
- and not hasattr(node.parent.parent.parent, "parent") # type: ignore[attr-defined]
and isinstance(node.value, ast.Constant)
+ and (unnested or name_main_unnested)
):
self.requires_python = node.value.value
| {"golden_diff": "diff --git a/cibuildwheel/projectfiles.py b/cibuildwheel/projectfiles.py\n--- a/cibuildwheel/projectfiles.py\n+++ b/cibuildwheel/projectfiles.py\n@@ -8,6 +8,43 @@\n from ._compat import tomllib\n \n \n+def get_parent(node: ast.AST | None, depth: int = 1) -> ast.AST | None:\n+ for _ in range(depth):\n+ node = getattr(node, \"parent\", None)\n+ return node\n+\n+\n+def is_main(parent: ast.AST | None) -> bool:\n+ if parent is None:\n+ return False\n+\n+ # This would be much nicer with 3.10's pattern matching!\n+ if not isinstance(parent, ast.If):\n+ return False\n+ if not isinstance(parent.test, ast.Compare):\n+ return False\n+\n+ try:\n+ (op,) = parent.test.ops\n+ (comp,) = parent.test.comparators\n+ except ValueError:\n+ return False\n+\n+ if not isinstance(op, ast.Eq):\n+ return False\n+\n+ values = {comp, parent.test.left}\n+\n+ mains = {x for x in values if isinstance(x, ast.Constant) and x.value == \"__main__\"}\n+ if len(mains) != 1:\n+ return False\n+ consts = {x for x in values if isinstance(x, ast.Name) and x.id == \"__name__\"}\n+ if len(consts) != 1:\n+ return False\n+\n+ return True\n+\n+\n class Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: str | None = None\n@@ -19,13 +56,22 @@\n super().visit(node)\n \n def visit_keyword(self, node: ast.keyword) -> None:\n+ # Must not be nested except for if __name__ == \"__main__\"\n+\n self.generic_visit(node)\n- # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n+ parent = get_parent(node, 4)\n+ unnested = parent is None\n+\n+ # This will be Module -> If -> Expr -> Call -> keyword\n+ name_main_unnested = (\n+ parent is not None and get_parent(parent) is None and is_main(get_parent(node, 3))\n+ )\n+\n if (\n node.arg == \"python_requires\"\n- and not hasattr(node.parent.parent.parent, \"parent\") # type: ignore[attr-defined]\n and isinstance(node.value, ast.Constant)\n+ and (unnested or name_main_unnested)\n ):\n self.requires_python = node.value.value\n", "issue": "setup.py setup() not detected in __name__ == '__main__' block\n### Description\n\nMy setup.py setup() includes:\r\n\r\n python_requires=\">=3.8\"\r\n\r\nHowever cibuildwheel still tries and fails to compile under Python 3.6.\r\n\r\nI understand there is [CIBW_BUILD / CIBW_SKIP](https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip) but that is then duplicating the python requires information.\r\n\r\nI can add a \\[project\\] section to pyproject.toml but that leads to a lot of problems because it ends up fighting with setup() parameters and they **really** don't like it.\r\n\r\nI believe cibuildwheel should establish the Python version support automatically whether it comes from setuptools or pyproject.toml, and not try to build on unsupported versions. My [pyproject.toml](https://github.com/rogerbinns/apsw/blob/master/pyproject.toml) is:\r\n\r\n````\r\n[build-system]\r\nrequires = [\"setuptools\"]\r\nbuild-backend = \"setuptools.build_meta\"\r\n````\r\n\n\n### Build log\n\nhttps://github.com/rogerbinns/apsw/actions/runs/6175182758/job/16761477543\n\n### CI config\n\nhttps://github.com/rogerbinns/apsw/actions/runs/6175182758/workflow\n", "before_files": [{"content": "from __future__ import annotations\n\nimport ast\nimport configparser\nimport contextlib\nfrom pathlib import Path\n\nfrom ._compat import tomllib\n\n\nclass Analyzer(ast.NodeVisitor):\n def __init__(self) -> None:\n self.requires_python: str | None = None\n\n def visit(self, node: ast.AST) -> None:\n for inner_node in ast.walk(node):\n for child in ast.iter_child_nodes(inner_node):\n child.parent = inner_node # type: ignore[attr-defined]\n super().visit(node)\n\n def visit_keyword(self, node: ast.keyword) -> None:\n self.generic_visit(node)\n # Must not be nested in an if or other structure\n # This will be Module -> Expr -> Call -> keyword\n if (\n node.arg == \"python_requires\"\n and not hasattr(node.parent.parent.parent, \"parent\") # type: ignore[attr-defined]\n and isinstance(node.value, ast.Constant)\n ):\n self.requires_python = node.value.value\n\n\ndef setup_py_python_requires(content: str) -> str | None:\n try:\n tree = ast.parse(content)\n analyzer = Analyzer()\n analyzer.visit(tree)\n return analyzer.requires_python or None\n except Exception: # pylint: disable=broad-except\n return None\n\n\ndef get_requires_python_str(package_dir: Path) -> str | None:\n \"\"\"Return the python requires string from the most canonical source available, or None\"\"\"\n\n # Read in from pyproject.toml:project.requires-python\n with contextlib.suppress(FileNotFoundError):\n with (package_dir / \"pyproject.toml\").open(\"rb\") as f1:\n info = tomllib.load(f1)\n with contextlib.suppress(KeyError, IndexError, TypeError):\n return str(info[\"project\"][\"requires-python\"])\n\n # Read in from setup.cfg:options.python_requires\n config = configparser.ConfigParser()\n with contextlib.suppress(FileNotFoundError):\n config.read(package_dir / \"setup.cfg\")\n with contextlib.suppress(KeyError, IndexError, TypeError):\n return str(config[\"options\"][\"python_requires\"])\n\n setup_py = package_dir / \"setup.py\"\n with contextlib.suppress(FileNotFoundError), setup_py.open(encoding=\"utf8\") as f2:\n return setup_py_python_requires(f2.read())\n\n return None\n", "path": "cibuildwheel/projectfiles.py"}]} | 1,458 | 596 |
gh_patches_debug_42711 | rasdani/github-patches | git_diff | sql-machine-learning__elasticdl-2463 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fail to fetch shard using multi-process in Python.
```python
class ImageDataset(IterableDataset):
def __init__(self, data_shard_service, shuffle=False):
self.data_shard_service = data_shard_service
self._shuffle = shuffle
def __iter__(self):
while True:
index = self.data_shard_service.fetch_shard()
image, label = read_images(index)
yield image, label
dataset = ImageDataset(
allreduce_controller.data_shard_service, shuffle=True
)
data_loader = DataLoader(
dataset=dataset,
batch_size=args.batch_size,
num_workers=2
)
```

</issue>
<code>
[start of elasticai_api/pytorch/controller.py]
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import os
15 import time
16 import traceback
17
18 from elasticai_api.common.base_controller import (
19 DEFAULT_MAX_ALLREDUCE_RETRY_NUM,
20 RETRY_ALLREDUCE_INTERVAL_SECS,
21 AllReduceController,
22 )
23 from elasticai_api.common.constants import WorkerEnv
24 from elasticai_api.common.data_shard_service import DataShardService
25 from elasticai_api.common.master_client import build_master_client
26 from elasticai_api.util.log_utils import default_logger as logger
27
28 try:
29 import horovod.torch as hvd
30 from horovod.common.exceptions import HorovodInternalError
31 from horovod.torch.functions import (
32 broadcast_optimizer_state,
33 broadcast_parameters,
34 broadcast_object,
35 )
36
37 except ImportError:
38 hvd = None
39
40
41 def create_elastic_controller(batch_size, num_epochs=None, dataset_size=None):
42 """Create an elastic AllReduce controller with data shard service.
43 Users can use the `controller.data_shard_service` to get data
44 shards like:
45 ```python
46 while True:
47 shard = controller.data_shard_service.fetch_shard()
48 for i in range(shard.start, shard.end):
49 yield i
50 ```
51
52 Users also can use the controller to do an elastic training.
53
54 ```python
55 model = ...
56 optimizer = optim.SGD(model.parameters(), lr=0.1)
57 optimizer = hvd.DistributedOptimizer(optimizer)
58
59 controller.set_broadcast_model(model)
60 ontroller.set_broadcast_optimizer(optimizer)
61 model.train()
62 for batch_idx, (data, target) in enumerate(data_loader):
63 # Use the elastic function to wrap the training function with a batch.
64 elastic_train_one_batch = allreduce_controller.elastic_run(
65 train_one_batch
66 )
67
68 def train_one_batch(model, optimizer, data, target):
69 optimizer.zero_grad()
70 output = model(data)
71 loss = F.nll_loss(output, target)
72 loss.backward()
73 optimizer.step()
74 return loss
75 ```
76
77 Args:
78 batch_size: The batch size of a single worker.
79 num_epochs: The number of epochs.
80 dataset_size: The total size of dataset.
81 """
82 master_client = build_master_client()
83 data_shard_service = DataShardService(
84 master_client, batch_size, num_epochs, dataset_size
85 )
86
87 controller = PyTorchAllReduceController(master_client, data_shard_service)
88 controller.init_horovod_locally()
89 return controller
90
91
92 class PyTorchAllReduceController(AllReduceController):
93 def __init__(self, master_client, data_shard_service):
94 super(PyTorchAllReduceController, self).__init__(
95 master_client, data_shard_service
96 )
97 self._model = None
98 self._optimizer = None
99 self.backward_passes_per_step = 1
100 # ElasticDL master should set the number of workers into envs.
101 self.global_batch_num_per_step = int(
102 os.getenv(WorkerEnv.WORKER_NUM, 1)
103 )
104 self.global_completed_batch_num = 0
105
106 def set_broadcast_model(self, model):
107 self._model = model
108
109 def set_broadcast_optimizer(self, optimizer):
110 self._optimizer = optimizer
111
112 def broadcast(self):
113 broadcast_parameters(self._model.state_dict(), root_rank=0)
114 broadcast_optimizer_state(self._optimizer, root_rank=0)
115 self.global_completed_batch_num = broadcast_object(
116 self.global_completed_batch_num, name="GlobalCompletedBatchNum"
117 )
118
119 def train_one_batch_with_retries(self, func, *args, **kwargs):
120 self.reset_backward_passes_per_step()
121 allreduce_success = False
122 for _ in range(DEFAULT_MAX_ALLREDUCE_RETRY_NUM):
123 try:
124 self._broadcast_if_needed()
125 result = func(*args, **kwargs)
126 allreduce_success = True
127 break
128 except HorovodInternalError:
129 logger.warning(
130 "Failed to perform allreduce operation on "
131 "the gradients. Retrying..."
132 )
133 # Those error message show that the communication
134 # to merge gradient fails and we can rebuild the
135 # communication.
136 self.restore()
137 except RuntimeError:
138 traceback.print_exc()
139 self.restore()
140 if not allreduce_success:
141 raise RuntimeError("Failed to perform allreduce.")
142 self._update_completed_minibatches()
143 return result
144
145 def restore(self):
146 time.sleep(RETRY_ALLREDUCE_INTERVAL_SECS)
147 # Call `load_state_dict` to reset the state of Horovod optimizer
148 self._optimizer.load_state_dict(self._optimizer.state_dict())
149 self._optimizer.zero_grad()
150 self._rendezvous_manager.init_horovod_if_needed()
151
152 def _update_completed_minibatches(self):
153 if (
154 hasattr(self._optimizer, "fixed_global_batch_size")
155 and self._optimizer.fixed_global_batch_size
156 ):
157 if self._optimizer.update_gradients:
158 self.global_completed_batch_num += (
159 self.global_batch_num_per_step
160 )
161 else:
162 self.global_completed_batch_num += hvd.size()
163
164 def reset_backward_passes_per_step(self):
165 # Only reset backward_passes_per_step when using the optimizer
166 # with fixed_global_batch_size
167 if (
168 hasattr(self._optimizer, "fixed_global_batch_size")
169 and self._optimizer.fixed_global_batch_size
170 ):
171 world_size = hvd.size()
172 rank = hvd.rank()
173 self.backward_passes_per_step = int(
174 self.global_batch_num_per_step / world_size
175 )
176 if rank < self.global_batch_num_per_step % world_size:
177 self.backward_passes_per_step += 1
178 if (
179 self.backward_passes_per_step
180 != self._optimizer.backward_passes_per_step
181 ):
182 self._optimizer.set_backward_passes_per_step(
183 self.backward_passes_per_step
184 )
185 logger.info(
186 "Backward passes per step = {}".format(
187 self._optimizer.backward_passes_per_step
188 )
189 )
190
[end of elasticai_api/pytorch/controller.py]
[start of elasticai_api/common/data_shard_service.py]
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import threading
15 from collections import deque
16
17 from elasticai_api.common.constants import TaskExecCounterKey
18 from elasticai_api.common.master_client import build_master_client
19 from elasticai_api.proto import elasticai_api_pb2
20
21
22 def build_data_shard_service(
23 batch_size,
24 num_epochs=None,
25 dataset_size=None,
26 task_type=elasticai_api_pb2.TRAINING,
27 ):
28 master_client = build_master_client()
29 return DataShardService(
30 batch_size=batch_size,
31 master_client=master_client,
32 num_epochs=num_epochs,
33 dataset_size=dataset_size,
34 task_type=task_type,
35 )
36
37
38 class DataShardService(object):
39 def __init__(
40 self,
41 master_client,
42 batch_size,
43 num_epochs=None,
44 dataset_size=None,
45 task_type=elasticai_api_pb2.TRAINING,
46 ):
47 self._mc = master_client
48 self._batch_size = batch_size
49 self._num_epochs = num_epochs
50 self._dataset_size = dataset_size
51 self._task_type = task_type
52 self._lock = threading.Lock()
53 self._failed_record_count = 0
54 self._reported_record_count = 0
55 self._current_task = None
56 self._pending_tasks = deque()
57 self._report_training_params()
58
59 def _report_training_params(self):
60 if self._num_epochs and self._dataset_size:
61 self._mc.report_training_params(
62 self._batch_size, self._num_epochs, self._dataset_size
63 )
64
65 def get_current_task(self):
66 return self._current_task
67
68 def get_task(self, task_type=None):
69 task = self._mc.get_task(task_type)
70 if task.type == self._task_type:
71 with self._lock:
72 self._pending_tasks.append(task)
73 if len(self._pending_tasks) == 1:
74 self._current_task = task
75
76 return task
77
78 def _report_task(self, task, err_msg=""):
79 if self._failed_record_count != 0:
80 exec_counters = {
81 TaskExecCounterKey.FAIL_COUNT: self._failed_record_count
82 }
83 else:
84 exec_counters = None
85 self._mc.report_task_result(
86 task.task_id, err_msg, exec_counters=exec_counters
87 )
88
89 def report_batch_done(self, batch_size=None, err_msg=""):
90 """
91 Report the number of records in the latest processed batch,
92 so DynamicShardingManager knows if some pending tasks are finished
93 and report_task_result to the master.
94 Return True if there are some finished tasks, False otherwise.
95 """
96 record_count = batch_size if batch_size else self._batch_size
97 self._reported_record_count += record_count
98 if err_msg:
99 self._failed_record_count += record_count
100
101 if not self._pending_tasks:
102 return False
103 task = self._pending_tasks[0]
104 total_record_num = task.shard.end - task.shard.start
105 if self._reported_record_count >= total_record_num:
106 # Keep popping tasks until the reported record count is less
107 # than the size of the current data since `batch_size` may be
108 # larger than `shard.end - shard.start`
109 with self._lock:
110 while (
111 self._pending_tasks
112 and self._reported_record_count
113 >= self._pending_tasks[0].shard.end
114 - self._pending_tasks[0].shard.start
115 ):
116 self._reported_record_count -= (
117 self._pending_tasks[0].shard.end
118 - self._pending_tasks[0].shard.start
119 )
120 task = self._pending_tasks.popleft()
121 self._report_task(task, err_msg)
122 self._failed_record_count = 0
123 if self._pending_tasks:
124 self._current_task = self._pending_tasks[0]
125 return True
126 return False
127
128 def fetch_shard(self):
129 """Fetch data shard and each shard contains the name,
130 start and end index.
131 """
132 task = self.get_task(self._task_type)
133 if task.type != self._task_type:
134 return None
135
136 return task.shard
137
[end of elasticai_api/common/data_shard_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticai_api/common/data_shard_service.py b/elasticai_api/common/data_shard_service.py
--- a/elasticai_api/common/data_shard_service.py
+++ b/elasticai_api/common/data_shard_service.py
@@ -11,8 +11,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import random
import threading
+import time
from collections import deque
+from multiprocessing import SimpleQueue
from elasticai_api.common.constants import TaskExecCounterKey
from elasticai_api.common.master_client import build_master_client
@@ -134,3 +137,51 @@
return None
return task.shard
+
+
+class RecordIndexService(DataShardService):
+ def __init__(
+ self,
+ master_client,
+ batch_size,
+ num_epochs=None,
+ dataset_size=None,
+ task_type=elasticai_api_pb2.TRAINING,
+ shuffle=False,
+ ):
+ super(RecordIndexService, self).__init__(
+ master_client, batch_size, num_epochs, dataset_size, task_type
+ )
+ self._shuffle = shuffle
+ self._shard_queue = SimpleQueue()
+ threading.Thread(
+ target=self._get_shard_indices,
+ name="fetch_shard_indices",
+ daemon=True,
+ ).start()
+
+ def _get_shard_indices(self):
+ while True:
+ if self._shard_queue.empty():
+ task = self.get_task(self._task_type)
+ if not task.shard or task.type != self._task_type:
+ break
+ ids = list(range(task.shard.start, task.shard.end))
+ if self._shuffle:
+ random.shuffle(ids)
+ for i in ids:
+ self._shard_queue.put(i)
+ else:
+ time.sleep(1)
+
+ def fetch_record_index(self):
+ """Fetch an index of the record. The function get an index
+ from a queue because there may be multiple sub-process to call
+ the function.
+ """
+ for _ in range(30):
+ if not self._shard_queue.empty():
+ return self._shard_queue.get()
+ else:
+ time.sleep(1)
+ raise StopIteration
diff --git a/elasticai_api/pytorch/controller.py b/elasticai_api/pytorch/controller.py
--- a/elasticai_api/pytorch/controller.py
+++ b/elasticai_api/pytorch/controller.py
@@ -21,7 +21,7 @@
AllReduceController,
)
from elasticai_api.common.constants import WorkerEnv
-from elasticai_api.common.data_shard_service import DataShardService
+from elasticai_api.common.data_shard_service import RecordIndexService
from elasticai_api.common.master_client import build_master_client
from elasticai_api.util.log_utils import default_logger as logger
@@ -38,15 +38,14 @@
hvd = None
-def create_elastic_controller(batch_size, num_epochs=None, dataset_size=None):
- """Create an elastic AllReduce controller with data shard service.
+def create_elastic_controller(
+ batch_size, num_epochs=None, dataset_size=None, shuffle=False
+):
+ """Create an elastic AllReduce controller with record index service.
Users can use the `controller.data_shard_service` to get data
shards like:
```python
- while True:
- shard = controller.data_shard_service.fetch_shard()
- for i in range(shard.start, shard.end):
- yield i
+ index = controller.data_shard_service.fetch_record_index()
```
Users also can use the controller to do an elastic training.
@@ -80,11 +79,17 @@
dataset_size: The total size of dataset.
"""
master_client = build_master_client()
- data_shard_service = DataShardService(
- master_client, batch_size, num_epochs, dataset_size
+ record_index_service = RecordIndexService(
+ master_client=master_client,
+ batch_size=batch_size,
+ num_epochs=num_epochs,
+ dataset_size=dataset_size,
+ shuffle=shuffle,
)
- controller = PyTorchAllReduceController(master_client, data_shard_service)
+ controller = PyTorchAllReduceController(
+ master_client, record_index_service
+ )
controller.init_horovod_locally()
return controller
| {"golden_diff": "diff --git a/elasticai_api/common/data_shard_service.py b/elasticai_api/common/data_shard_service.py\n--- a/elasticai_api/common/data_shard_service.py\n+++ b/elasticai_api/common/data_shard_service.py\n@@ -11,8 +11,11 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import random\n import threading\n+import time\n from collections import deque\n+from multiprocessing import SimpleQueue\n \n from elasticai_api.common.constants import TaskExecCounterKey\n from elasticai_api.common.master_client import build_master_client\n@@ -134,3 +137,51 @@\n return None\n \n return task.shard\n+\n+\n+class RecordIndexService(DataShardService):\n+ def __init__(\n+ self,\n+ master_client,\n+ batch_size,\n+ num_epochs=None,\n+ dataset_size=None,\n+ task_type=elasticai_api_pb2.TRAINING,\n+ shuffle=False,\n+ ):\n+ super(RecordIndexService, self).__init__(\n+ master_client, batch_size, num_epochs, dataset_size, task_type\n+ )\n+ self._shuffle = shuffle\n+ self._shard_queue = SimpleQueue()\n+ threading.Thread(\n+ target=self._get_shard_indices,\n+ name=\"fetch_shard_indices\",\n+ daemon=True,\n+ ).start()\n+\n+ def _get_shard_indices(self):\n+ while True:\n+ if self._shard_queue.empty():\n+ task = self.get_task(self._task_type)\n+ if not task.shard or task.type != self._task_type:\n+ break\n+ ids = list(range(task.shard.start, task.shard.end))\n+ if self._shuffle:\n+ random.shuffle(ids)\n+ for i in ids:\n+ self._shard_queue.put(i)\n+ else:\n+ time.sleep(1)\n+\n+ def fetch_record_index(self):\n+ \"\"\"Fetch an index of the record. The function get an index\n+ from a queue because there may be multiple sub-process to call\n+ the function.\n+ \"\"\"\n+ for _ in range(30):\n+ if not self._shard_queue.empty():\n+ return self._shard_queue.get()\n+ else:\n+ time.sleep(1)\n+ raise StopIteration\ndiff --git a/elasticai_api/pytorch/controller.py b/elasticai_api/pytorch/controller.py\n--- a/elasticai_api/pytorch/controller.py\n+++ b/elasticai_api/pytorch/controller.py\n@@ -21,7 +21,7 @@\n AllReduceController,\n )\n from elasticai_api.common.constants import WorkerEnv\n-from elasticai_api.common.data_shard_service import DataShardService\n+from elasticai_api.common.data_shard_service import RecordIndexService\n from elasticai_api.common.master_client import build_master_client\n from elasticai_api.util.log_utils import default_logger as logger\n \n@@ -38,15 +38,14 @@\n hvd = None\n \n \n-def create_elastic_controller(batch_size, num_epochs=None, dataset_size=None):\n- \"\"\"Create an elastic AllReduce controller with data shard service.\n+def create_elastic_controller(\n+ batch_size, num_epochs=None, dataset_size=None, shuffle=False\n+):\n+ \"\"\"Create an elastic AllReduce controller with record index service.\n Users can use the `controller.data_shard_service` to get data\n shards like:\n ```python\n- while True:\n- shard = controller.data_shard_service.fetch_shard()\n- for i in range(shard.start, shard.end):\n- yield i\n+ index = controller.data_shard_service.fetch_record_index()\n ```\n \n Users also can use the controller to do an elastic training.\n@@ -80,11 +79,17 @@\n dataset_size: The total size of dataset.\n \"\"\"\n master_client = build_master_client()\n- data_shard_service = DataShardService(\n- master_client, batch_size, num_epochs, dataset_size\n+ record_index_service = RecordIndexService(\n+ master_client=master_client,\n+ batch_size=batch_size,\n+ num_epochs=num_epochs,\n+ dataset_size=dataset_size,\n+ shuffle=shuffle,\n )\n \n- controller = PyTorchAllReduceController(master_client, data_shard_service)\n+ controller = PyTorchAllReduceController(\n+ master_client, record_index_service\n+ )\n controller.init_horovod_locally()\n return controller\n", "issue": "Fail to fetch shard using multi-process in Python.\n```python\r\nclass ImageDataset(IterableDataset):\r\n def __init__(self, data_shard_service, shuffle=False):\r\n self.data_shard_service = data_shard_service\r\n self._shuffle = shuffle\r\n\r\n def __iter__(self):\r\n while True:\r\n index = self.data_shard_service.fetch_shard()\r\n image, label = read_images(index)\r\n yield image, label\r\n\r\ndataset = ImageDataset(\r\n allreduce_controller.data_shard_service, shuffle=True\r\n )\r\n\r\ndata_loader = DataLoader(\r\n dataset=dataset,\r\n batch_size=args.batch_size,\r\n num_workers=2\r\n )\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport time\nimport traceback\n\nfrom elasticai_api.common.base_controller import (\n DEFAULT_MAX_ALLREDUCE_RETRY_NUM,\n RETRY_ALLREDUCE_INTERVAL_SECS,\n AllReduceController,\n)\nfrom elasticai_api.common.constants import WorkerEnv\nfrom elasticai_api.common.data_shard_service import DataShardService\nfrom elasticai_api.common.master_client import build_master_client\nfrom elasticai_api.util.log_utils import default_logger as logger\n\ntry:\n import horovod.torch as hvd\n from horovod.common.exceptions import HorovodInternalError\n from horovod.torch.functions import (\n broadcast_optimizer_state,\n broadcast_parameters,\n broadcast_object,\n )\n\nexcept ImportError:\n hvd = None\n\n\ndef create_elastic_controller(batch_size, num_epochs=None, dataset_size=None):\n \"\"\"Create an elastic AllReduce controller with data shard service.\n Users can use the `controller.data_shard_service` to get data\n shards like:\n ```python\n while True:\n shard = controller.data_shard_service.fetch_shard()\n for i in range(shard.start, shard.end):\n yield i\n ```\n\n Users also can use the controller to do an elastic training.\n\n ```python\n model = ...\n optimizer = optim.SGD(model.parameters(), lr=0.1)\n optimizer = hvd.DistributedOptimizer(optimizer)\n\n controller.set_broadcast_model(model)\n ontroller.set_broadcast_optimizer(optimizer)\n model.train()\n for batch_idx, (data, target) in enumerate(data_loader):\n # Use the elastic function to wrap the training function with a batch.\n elastic_train_one_batch = allreduce_controller.elastic_run(\n train_one_batch\n )\n\n def train_one_batch(model, optimizer, data, target):\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n loss.backward()\n optimizer.step()\n return loss\n ```\n\n Args:\n batch_size: The batch size of a single worker.\n num_epochs: The number of epochs.\n dataset_size: The total size of dataset.\n \"\"\"\n master_client = build_master_client()\n data_shard_service = DataShardService(\n master_client, batch_size, num_epochs, dataset_size\n )\n\n controller = PyTorchAllReduceController(master_client, data_shard_service)\n controller.init_horovod_locally()\n return controller\n\n\nclass PyTorchAllReduceController(AllReduceController):\n def __init__(self, master_client, data_shard_service):\n super(PyTorchAllReduceController, self).__init__(\n master_client, data_shard_service\n )\n self._model = None\n self._optimizer = None\n self.backward_passes_per_step = 1\n # ElasticDL master should set the number of workers into envs.\n self.global_batch_num_per_step = int(\n os.getenv(WorkerEnv.WORKER_NUM, 1)\n )\n self.global_completed_batch_num = 0\n\n def set_broadcast_model(self, model):\n self._model = model\n\n def set_broadcast_optimizer(self, optimizer):\n self._optimizer = optimizer\n\n def broadcast(self):\n broadcast_parameters(self._model.state_dict(), root_rank=0)\n broadcast_optimizer_state(self._optimizer, root_rank=0)\n self.global_completed_batch_num = broadcast_object(\n self.global_completed_batch_num, name=\"GlobalCompletedBatchNum\"\n )\n\n def train_one_batch_with_retries(self, func, *args, **kwargs):\n self.reset_backward_passes_per_step()\n allreduce_success = False\n for _ in range(DEFAULT_MAX_ALLREDUCE_RETRY_NUM):\n try:\n self._broadcast_if_needed()\n result = func(*args, **kwargs)\n allreduce_success = True\n break\n except HorovodInternalError:\n logger.warning(\n \"Failed to perform allreduce operation on \"\n \"the gradients. Retrying...\"\n )\n # Those error message show that the communication\n # to merge gradient fails and we can rebuild the\n # communication.\n self.restore()\n except RuntimeError:\n traceback.print_exc()\n self.restore()\n if not allreduce_success:\n raise RuntimeError(\"Failed to perform allreduce.\")\n self._update_completed_minibatches()\n return result\n\n def restore(self):\n time.sleep(RETRY_ALLREDUCE_INTERVAL_SECS)\n # Call `load_state_dict` to reset the state of Horovod optimizer\n self._optimizer.load_state_dict(self._optimizer.state_dict())\n self._optimizer.zero_grad()\n self._rendezvous_manager.init_horovod_if_needed()\n\n def _update_completed_minibatches(self):\n if (\n hasattr(self._optimizer, \"fixed_global_batch_size\")\n and self._optimizer.fixed_global_batch_size\n ):\n if self._optimizer.update_gradients:\n self.global_completed_batch_num += (\n self.global_batch_num_per_step\n )\n else:\n self.global_completed_batch_num += hvd.size()\n\n def reset_backward_passes_per_step(self):\n # Only reset backward_passes_per_step when using the optimizer\n # with fixed_global_batch_size\n if (\n hasattr(self._optimizer, \"fixed_global_batch_size\")\n and self._optimizer.fixed_global_batch_size\n ):\n world_size = hvd.size()\n rank = hvd.rank()\n self.backward_passes_per_step = int(\n self.global_batch_num_per_step / world_size\n )\n if rank < self.global_batch_num_per_step % world_size:\n self.backward_passes_per_step += 1\n if (\n self.backward_passes_per_step\n != self._optimizer.backward_passes_per_step\n ):\n self._optimizer.set_backward_passes_per_step(\n self.backward_passes_per_step\n )\n logger.info(\n \"Backward passes per step = {}\".format(\n self._optimizer.backward_passes_per_step\n )\n )\n", "path": "elasticai_api/pytorch/controller.py"}, {"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\nfrom collections import deque\n\nfrom elasticai_api.common.constants import TaskExecCounterKey\nfrom elasticai_api.common.master_client import build_master_client\nfrom elasticai_api.proto import elasticai_api_pb2\n\n\ndef build_data_shard_service(\n batch_size,\n num_epochs=None,\n dataset_size=None,\n task_type=elasticai_api_pb2.TRAINING,\n):\n master_client = build_master_client()\n return DataShardService(\n batch_size=batch_size,\n master_client=master_client,\n num_epochs=num_epochs,\n dataset_size=dataset_size,\n task_type=task_type,\n )\n\n\nclass DataShardService(object):\n def __init__(\n self,\n master_client,\n batch_size,\n num_epochs=None,\n dataset_size=None,\n task_type=elasticai_api_pb2.TRAINING,\n ):\n self._mc = master_client\n self._batch_size = batch_size\n self._num_epochs = num_epochs\n self._dataset_size = dataset_size\n self._task_type = task_type\n self._lock = threading.Lock()\n self._failed_record_count = 0\n self._reported_record_count = 0\n self._current_task = None\n self._pending_tasks = deque()\n self._report_training_params()\n\n def _report_training_params(self):\n if self._num_epochs and self._dataset_size:\n self._mc.report_training_params(\n self._batch_size, self._num_epochs, self._dataset_size\n )\n\n def get_current_task(self):\n return self._current_task\n\n def get_task(self, task_type=None):\n task = self._mc.get_task(task_type)\n if task.type == self._task_type:\n with self._lock:\n self._pending_tasks.append(task)\n if len(self._pending_tasks) == 1:\n self._current_task = task\n\n return task\n\n def _report_task(self, task, err_msg=\"\"):\n if self._failed_record_count != 0:\n exec_counters = {\n TaskExecCounterKey.FAIL_COUNT: self._failed_record_count\n }\n else:\n exec_counters = None\n self._mc.report_task_result(\n task.task_id, err_msg, exec_counters=exec_counters\n )\n\n def report_batch_done(self, batch_size=None, err_msg=\"\"):\n \"\"\"\n Report the number of records in the latest processed batch,\n so DynamicShardingManager knows if some pending tasks are finished\n and report_task_result to the master.\n Return True if there are some finished tasks, False otherwise.\n \"\"\"\n record_count = batch_size if batch_size else self._batch_size\n self._reported_record_count += record_count\n if err_msg:\n self._failed_record_count += record_count\n\n if not self._pending_tasks:\n return False\n task = self._pending_tasks[0]\n total_record_num = task.shard.end - task.shard.start\n if self._reported_record_count >= total_record_num:\n # Keep popping tasks until the reported record count is less\n # than the size of the current data since `batch_size` may be\n # larger than `shard.end - shard.start`\n with self._lock:\n while (\n self._pending_tasks\n and self._reported_record_count\n >= self._pending_tasks[0].shard.end\n - self._pending_tasks[0].shard.start\n ):\n self._reported_record_count -= (\n self._pending_tasks[0].shard.end\n - self._pending_tasks[0].shard.start\n )\n task = self._pending_tasks.popleft()\n self._report_task(task, err_msg)\n self._failed_record_count = 0\n if self._pending_tasks:\n self._current_task = self._pending_tasks[0]\n return True\n return False\n\n def fetch_shard(self):\n \"\"\"Fetch data shard and each shard contains the name,\n start and end index.\n \"\"\"\n task = self.get_task(self._task_type)\n if task.type != self._task_type:\n return None\n\n return task.shard\n", "path": "elasticai_api/common/data_shard_service.py"}]} | 3,958 | 987 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.